diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e3001ee197..a2f994643b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - - name: Discord - url: https://per.co.na/discord - about: Please join our discord for quick questions. - name: Forum url: https://forums.percona.com/ about: Please join our forums for general questions ans discussions. diff --git a/.github/workflows/admin.yml b/.github/workflows/admin.yml index 3950ddd73f..64d05ed1ca 100644 --- a/.github/workflows/admin.yml +++ b/.github/workflows/admin.yml @@ -1,4 +1,4 @@ -name: Admin +name: 'Client: pmm and pmm-admin' on: push: @@ -18,25 +18,12 @@ on: - "update/**" - "vmproxy/**" - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: test: name: Tests runs-on: ubuntu-22.04 strategy: fail-fast: false - matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - - env: - GO_VERSION: ${{ matrix.go-version }} defaults: run: @@ -47,31 +34,29 @@ jobs: uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH - echo "$GOBIN" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go-modules- - name: Download Go modules - run: go mod download + run: go mod download -x - name: Build and install run: make install @@ -101,10 +86,7 @@ jobs: strategy: fail-fast: false matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - - env: - GO_VERSION: ${{ matrix.go-version }} + test-type: [ pmm-common, pmm-server-install, pmm-docker-test ] defaults: run: @@ -115,43 +97,66 @@ jobs: uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH - echo "$GOBIN" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go-modules- - name: Download Go modules - run: go mod download + run: go mod download -x - name: Build and install run: make -C ../admin install + - name: Setup tools + run: | + sudo apt-get install -y wget jq + sudo ln -sf /home/runner/go/bin/pmm /usr/bin + sudo chown -R runner:docker /usr/bin/pmm + - name: Install Playwright run: | npm ci npx playwright install - npm install -D @playwright/test - - name: Run tests - run: npx playwright test --reporter=list tests/pmm-cli/ + - name: Run "pmm" common tests + if: ${{ matrix.test-type == 'pmm-common' }} + run: npx playwright test tests/pmm-cli/pmm.spec.ts + + - name: Run "pmm server install" tests + if: ${{ matrix.test-type == 'pmm-server-install' }} + run: npx playwright test tests/pmm-cli/server/install.spec.ts + + - name: Run "pmm server upgrade" tests + if: ${{ matrix.test-type == 'pmm-server-update' }} + run: npx playwright test tests/pmm-cli/server/upgrade.spec.ts + + - name: Run specific docker tests + if: ${{ matrix.test-type == 'pmm-docker-test' }} + run: npx playwright test tests/pmm-cli/server/docker-specific.spec.ts + + - name: Attach the report on failure + if: failure() + uses: actions/upload-artifact@v3 + with: + name: "report-${{ matrix.go-version }}-${{ matrix.test-type }}" + path: ${{ github.workspace }}/cli-tests/playwright-report/ - name: Run debug commands on failure if: ${{ failure() }} diff --git a/.github/workflows/agent.yml b/.github/workflows/agent.yml index d187ea00cb..8edef91181 100644 --- a/.github/workflows/agent.yml +++ b/.github/workflows/agent.yml @@ -1,4 +1,4 @@ -name: Agent +name: 'Client: pmm-agent' on: push: @@ -19,23 +19,12 @@ on: - "update/**" - "vmproxy/**" - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: test: name: Tests runs-on: ubuntu-22.04 strategy: matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - may-fail: [ true ] - images: - { mysql: 'mysql:5.6', mongo: 'mongo:4.2', postgres: 'postgres:10', pmm_server: 'percona/pmm-server:2.0.0' } - { mysql: 'mysql:5.7', mongo: 'mongo:4.4', postgres: 'postgres:11', pmm_server: 'percona/pmm-server:2.0.1' } @@ -59,10 +48,9 @@ jobs: - { mysql: 'mariadb:10.3', mongo: 'percona/percona-server-mongodb:4.4', postgres: 'postgres:9.5', pmm_server: 'perconalab/pmm-server:dev-latest' } - { mysql: 'mariadb:10.4', postgres: 'postgres:9.6', pmm_server: 'perconalab/pmm-server:dev-latest' } - continue-on-error: ${{ matrix.may-fail }} + continue-on-error: true env: - GO_VERSION: ${{ matrix.go-version }} MYSQL_IMAGE: ${{ matrix.images.mysql }} MONGO_IMAGE: ${{ matrix.images.mongo }} POSTGRES_IMAGE: ${{ matrix.images.postgres }} @@ -78,32 +66,29 @@ jobs: uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go-modules- - name: Download Go modules - run: | - pushd ../tools && go mod download - popd && go mod download + run: go mod download -x - name: Build and install run: make install @@ -121,7 +106,7 @@ jobs: with: file: cover.out flags: agent - env_vars: GO_VERSION,MYSQL_IMAGE,MONGO_IMAGE,POSTGRES_IMAGE,PMM_SERVER_IMAGE + env_vars: MYSQL_IMAGE,MONGO_IMAGE,POSTGRES_IMAGE,PMM_SERVER_IMAGE fail_ci_if_error: false - name: Run debug commands on failure diff --git a/.github/workflows/go-tip-checks.yml b/.github/workflows/go-tip-checks.yml deleted file mode 100644 index 32c255df96..0000000000 --- a/.github/workflows/go-tip-checks.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Go-tip - -on: - schedule: - # Every Sunday at 13:00 - - cron: "0 13 * * 0" - -jobs: - common: - uses: percona/pmm/.github/workflows/common.yml@main - - admin: - uses: percona/pmm/.github/workflows/admin.yml@main - - agent: - uses: percona/pmm/.github/workflows/agent.yml@main - - api: - uses: percona/pmm/.github/workflows/api.yml@main - - managed: - uses: percona/pmm/.github/workflows/managed.yml@main diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 83ca07e205..dc57749d36 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -10,58 +10,44 @@ on: pull_request: - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: check: name: Checks runs-on: ubuntu-22.04 strategy: fail-fast: false - matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - may-fail: [ false ] - - env: - GO_VERSION: ${{ matrix.go-version }} steps: - name: Check out code uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + ${{ runner.os }}-go-modules- - name: Download Go modules run: | - pushd tools && go mod download - popd && go mod download + pushd tools && go mod download -x + popd && go mod download -x - name: Install development tools run: make init @@ -98,7 +84,7 @@ jobs: uses: reviewdog/action-golangci-lint@v2 with: github_token: ${{ secrets.ROBOT_TOKEN || secrets.GITHUB_TOKEN }} - go_version: ${{ env.GO_VERSION }} + go_version_file: ${{ github.workspace }}/go.mod reporter: github-pr-review fail_on_error: true cache: false diff --git a/.github/workflows/managed.yml b/.github/workflows/managed.yml index 263d80a9d3..ac69ccc5cd 100644 --- a/.github/workflows/managed.yml +++ b/.github/workflows/managed.yml @@ -1,9 +1,5 @@ name: Managed on: - schedule: - # run every Sunday to re-populate caches after they are cleaned on Saturday - - cron: "0 12 * * 0" - push: branches: - main @@ -22,14 +18,6 @@ on: - "update/**" - "vmproxy/**" - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: test: name: Tests @@ -37,57 +25,53 @@ jobs: timeout-minutes: 30 strategy: fail-fast: false - matrix: - use-cache: [ false ] - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} env: - GO_VERSION: ${{ matrix.go-version }} PMM_SERVER_IMAGE: perconalab/pmm-server:dev-latest AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }} OAUTH_PMM_CLIENT_ID: ${{ secrets.OAUTH_PMM_CLIENT_ID }} OAUTH_PMM_CLIENT_SECRET: ${{ secrets.OAUTH_PMM_CLIENT_SECRET }} + DEVCONTAINER_CACHE_ENABLED: false steps: - name: Check out code uses: actions/checkout@v3 - name: Enable Go build cache - if: matrix.use-cache + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} uses: actions/cache@v3 with: path: ~/.cache/go-build - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-build-${{ github.ref }}-${{ hashFiles('**') }} + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-build-${{ github.ref }}- - ${{ runner.os }}-go-${{ env.GO_VERSION }}-build- + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache - if: matrix.use-cache + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go-modules- - - name: Download Go modules - if: matrix.use-cache + - name: Download tools + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} run: | - pushd tools && go mod download - popd && go mod download + pushd tools && go mod download -x + popd && go mod download -x - name: Initialize CI environment run: make env-compose-up - name: Restore Go build cache - if: matrix.use-cache + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} continue-on-error: true run: docker cp ~/.cache/go-build pmm-managed-server:/root/.cache/go-build - name: Restore Go modules cache - if: matrix.use-cache + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} continue-on-error: true run: docker cp ~/go/pkg/mod pmm-managed-server:/root/go/pkg/mod @@ -105,11 +89,11 @@ jobs: with: file: managed/cover.out flags: managed - env_vars: GO_VERSION,PMM_SERVER_IMAGE + env_vars: PMM_SERVER_IMAGE fail_ci_if_error: false - name: Cache - if: matrix.use-cache + if: ${{ fromJSON(env.DEVCONTAINER_CACHE_ENABLED) }} run: | docker exec pmm-managed-server go clean -testcache docker exec --workdir=/root/go/src/github.com/percona/pmm/managed pmm-managed-server find . -type d -name fuzzdata -exec rm -r {} + diff --git a/.github/workflows/qan-api2.yml b/.github/workflows/qan-api2.yml index bbc6631c3f..c16b984bd2 100644 --- a/.github/workflows/qan-api2.yml +++ b/.github/workflows/qan-api2.yml @@ -19,25 +19,12 @@ on: - "update/**" - "vmproxy/**" - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: test: name: Tests runs-on: ubuntu-22.04 strategy: fail-fast: false - matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - - env: - GO_VERSION: ${{ matrix.go-version }} defaults: run: @@ -48,25 +35,29 @@ jobs: uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - env: - # to avoid error due to `go version` accepting -v flag with an argument since 1.15 - GOFLAGS: "" - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- + + - name: Enable Go modules cache + uses: actions/cache@v3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go-modules- - name: Download Go modules - run: go mod download + run: go mod download -x - name: Build and install run: make install diff --git a/.github/workflows/sbom-assets.yml b/.github/workflows/sbom-assets.yml deleted file mode 100644 index b045120773..0000000000 --- a/.github/workflows/sbom-assets.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Generate an SBOM file -on: - push: - tags: - - v[0-9]+.[0-9]+.[0-9]+* - -jobs: - vmproxy: - runs-on: ubuntu-22.04 - steps: - - name: Create vmproxy SBOM - uses: anchore/sbom-action@v0 - with: - path: ./vmproxy - artifact-name: vmproxy-sbom.spdx - - - name: Publish vmproxy SBOM - uses: anchore/sbom-action/publish-sbom@v0 - with: - sbom-artifact-match: ".*\\.spdx$" diff --git a/.github/workflows/sbom.yml b/.github/workflows/sbom.yml index 622009b1f9..d04acadc5e 100644 --- a/.github/workflows/sbom.yml +++ b/.github/workflows/sbom.yml @@ -7,21 +7,35 @@ on: jobs: sbom: - name: Create and publish SBOM runs-on: ubuntu-22.04 - steps: - name: Check out code uses: actions/checkout@v3 - - name: Create SBOM assets + - name: Create SBOM for PMM uses: anchore/sbom-action@v0 with: file: go.mod artifact-name: pmm.spdx.json - - name: Publish SBOM assets + - name: Publish SBOM for PMM uses: anchore/sbom-action/publish-sbom@v0 with: sbom-artifact-match: ".*\\.spdx\\.json$" + vmproxy: + runs-on: ubuntu-22.04 + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Create SBOM for vmproxy + uses: anchore/sbom-action@v0 + with: + path: ./vmproxy + artifact-name: vmproxy.spdx.json + + - name: Publish SBOM for vmproxy + uses: anchore/sbom-action/publish-sbom@v0 + with: + sbom-artifact-match: ".*\\.spdx\\.json$" diff --git a/.github/workflows/vmproxy.yml b/.github/workflows/vmproxy.yml index 0064efda40..5bbbb6196d 100644 --- a/.github/workflows/vmproxy.yml +++ b/.github/workflows/vmproxy.yml @@ -19,25 +19,12 @@ on: - "qan-api2/**" - "update/**" - workflow_call: - inputs: - go-version: - description: An array of Go versions to be tested against, in a format of json string. - required: false - type: string - default: '["tip"]' - jobs: test: name: Tests runs-on: ubuntu-22.04 strategy: fail-fast: false - matrix: - go-version: ${{ fromJson(inputs.go-version || '["1.20.x"]') }} - - env: - GO_VERSION: ${{ matrix.go-version }} defaults: run: @@ -48,31 +35,30 @@ jobs: uses: actions/checkout@v3 - name: Set up Go release - if: env.GO_VERSION != 'tip' uses: actions/setup-go@v4 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: ${{ github.workspace }}/go.mod + cache: false - - name: Set up Go tip - if: env.GO_VERSION == 'tip' - run: | - git clone --depth=1 https://go.googlesource.com/go $HOME/gotip - cd $HOME/gotip/src - ./make.bash - echo "GOROOT=$HOME/gotip" >> $GITHUB_ENV - echo "$HOME/gotip/bin" >> $GITHUB_PATH - echo "$GOBIN" >> $GITHUB_PATH + - name: Enable Go build cache + uses: actions/cache@v3 + with: + path: ~/.cache/go-build + key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }} + restore-keys: | + ${{ runner.os }}-go-build-${{ github.ref }}- + ${{ runner.os }}-go-build- - name: Enable Go modules cache uses: actions/cache@v3 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go-${{ env.GO_VERSION }}-modules- + ${{ runner.os }}-go-modules- - name: Download Go modules - run: go mod download + run: go mod download -x - name: Build and install run: make install diff --git a/.golangci.yml b/.golangci.yml index 85fa2c154a..d500a8e1a0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,6 +2,7 @@ linters-settings: exhaustive: default-signifies-exhaustive: true + cyclop: max-complexity: 30 @@ -49,6 +50,9 @@ linters-settings: line-length: 170 tab-width: 4 + nestif: + min-complexity: 7 + tagliatelle: # Check the struck tag name case. case: @@ -71,6 +75,7 @@ linters: enable-all: true disable: # keep the rules sorted alpahbetically + - execinquery # false positives only - exhaustivestruct # too annoying - exhaustruct # too many files to fix/nolint - deadcode # unmaintained, we leverage `unused` @@ -79,6 +84,7 @@ linters: - gochecknoinits # we use init functions - gocyclo # using cyclop with the max 30 instead - goerr113 # extra work & poor benefit + - golint # unmaintained, replaced by revive - gomnd # we are using numbers in many cases - gomoddirectives # we use replace directives - ifshort # a lot of false positives @@ -105,16 +111,9 @@ linters: - ireturn - gocognit - maintidx - - nestif - - errorlint - - forcetypeassert - - golint - - nonamedreturns - - execinquery - interfacebloat - gosimple - contextcheck - - nakedret - forbidigo - errcheck - dupl @@ -141,10 +140,11 @@ issues: linters: # keep sorted - exhaustivestruct # very annoying + - forcetypeassert # for tests' brevity sake - funlen # tests may be long - gocognit # triggered by subtests - gomnd # tests are full of magic numbers + - lll # tests often require long lines + - nonamedreturns # it's not critical for tests, albeit desirable - testpackage # senseless - unused # very annoying false positive: https://github.com/golangci/golangci-lint/issues/791 - - lll # tests often require long lines - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 14b2f76e34..6efdba02c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -153,7 +153,7 @@ The first one is a Unit testing, so we have unit tests in each repository mentio ### API tests -API tests are included into pmm repository and located in [api-tests directory](https://github.com/percona/pmm/managed/tree/main/api-tests). API tests runs against running PMM Server container. +API tests are included into pmm repository and located in [api-tests directory](https://github.com/percona/pmm/tree/main/api-tests). API tests runs against running PMM Server container. ### End to End (E2E) tests @@ -221,4 +221,4 @@ For more efficient review process we use a mixed approach: Once your pull request is merged, you are an official Percona Community Contributor. Welcome to the community! -We're looking forward to your contributions and hope to hear from you soon on our [Forums](https://forums.percona.com) and [Discord](https://discord.gg/mQEyGPkNbR). +We're looking forward to your contributions and hope to hear from you soon on our [Forums](https://forums.percona.com). diff --git a/Makefile.include b/Makefile.include index 55d8d48f6d..f7f6af8a9f 100644 --- a/Makefile.include +++ b/Makefile.include @@ -47,7 +47,7 @@ gen-api: ## Generate PMM API bin/buf generate -v api - for API in api/agentlocalpb api/serverpb api/inventorypb api/managementpb api/managementpb/dbaas api/managementpb/ia api/managementpb/alerting api/managementpb/backup api/managementpb/azure api/managementpb/role api/qanpb api/managementpb/agent api/managementpb/service api/platformpb api/userpb api/onboardingpb; do \ + for API in api/agentlocalpb api/serverpb api/inventorypb api/managementpb api/managementpb/dbaas api/managementpb/ia api/managementpb/alerting api/managementpb/backup api/managementpb/azure api/managementpb/role api/qanpb api/managementpb/agent api/managementpb/node api/managementpb/service api/platformpb api/userpb api/onboardingpb; do \ set -x ; \ bin/swagger mixin $$API/json/header.json $$API/*.swagger.json --output=$$API/json/$$(basename $$API).json --keep-spec-order; \ bin/swagger flatten --with-flatten=expand --with-flatten=remove-unused $$API/json/$$(basename $$API).json --output=$$API/json/$$(basename $$API).json ; \ @@ -67,16 +67,16 @@ gen-api: ## Generate PMM API done # generate public API spec, omit agentlocalpb (always private), - # and managementpb/dbaas, managementpb/ia, managementpb/azure, managementpb/role and qanpb (not v1 yet) + # as well as a number of protos that are in beta (not v1 yet, they all go to a similar call below) bin/swagger mixin --output=api/swagger/swagger.json \ api/swagger/header.json \ api/serverpb/json/serverpb.json \ api/userpb/json/userpb.json \ api/inventorypb/json/inventorypb.json \ api/managementpb/json/managementpb.json \ - api/onboardingpb/json/onboardingpb.json \ - api/managementpb/backup/json/backup.json \ - api/managementpb/alerting/json/alerting.json + api/onboardingpb/json/onboardingpb.json \ + api/managementpb/backup/json/backup.json \ + api/managementpb/alerting/json/alerting.json bin/swagger validate api/swagger/swagger.json bin/swagger-order --output=api/swagger/swagger.json api/swagger/swagger.json @@ -95,6 +95,7 @@ gen-api: ## Generate PMM API api/managementpb/azure/json/azure.json \ api/managementpb/role/json/role.json \ api/managementpb/agent/json/agent.json \ + api/managementpb/node/json/node.json \ api/managementpb/service/json/service.json \ api/qanpb/json/qanpb.json \ api/onboardingpb/json/onboardingpb.json \ @@ -128,7 +129,7 @@ clean: clean_swagger ## Remove generated files find api -name '*.pb.gw.go' -print -delete find api -name '*.validate.go' -print -delete - for API in api/agentlocalpb api/serverpb api/inventorypb api/managementpb api/managementpb/dbaas api/managementpb/ia api/managementpb/alerting api/managementpb/backup api/managementpb/role api/managementpb/agent api/managementpb/service api/qanpb api/platformpb api/onboardingpb ; do \ + for API in api/agentlocalpb api/serverpb api/inventorypb api/managementpb api/managementpb/dbaas api/managementpb/ia api/managementpb/alerting api/managementpb/backup api/managementpb/role api/managementpb/agent api/managementpb/node api/managementpb/service api/qanpb api/platformpb api/onboardingpb ; do \ rm -fr $$API/json/client $$API/json/models $$API/json/$$(basename $$API).json ; \ done rm -f api/swagger/swagger.json api/swagger/swagger-dev.json @@ -147,7 +148,7 @@ check: ## Run required checkers and linters check-license: ## Run license header checks against source files bin/license-eye -c .licenserc.yaml header check -check-all: check-license check ## Run golang ci linter to check new changes from main +check-all: check-license check ## Run golangci linter to check for changes against main bin/golangci-lint run -c=.golangci.yml --new-from-rev=main FILES = $(shell find . -type f -name '*.go') diff --git a/README.md b/README.md index 8bfa76b770..5ceeac588e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ [![CLA assistant](https://cla-assistant.percona.com/readme/badge/percona/pmm)](https://cla-assistant.percona.com/percona/pmm) [![Code coverage](https://codecov.io/gh/percona/pmm/branch/main/graph/badge.svg)](https://codecov.io/gh/percona/pmm) [![Go Report Card](https://goreportcard.com/badge/github.com/percona/pmm)](https://goreportcard.com/report/github.com/percona/pmm) -[![Discord](https://img.shields.io/discord/808660945513611334?label=Discord&logo=Discord&style=flat)](https://per.co.na/discord) [![Forum](https://img.shields.io/badge/Forum-join-brightgreen)](https://forums.percona.com/) ![PMM](img/pmm-logo.png) @@ -76,7 +75,7 @@ We encourage contributions and are always looking for new members that are as de If you’re looking for information about how you can contribute, we have [contribution guidelines](CONTRIBUTING.md) across all our repositories in `CONTRIBUTING.md` files. Some of them may just link to the main project’s repository’s contribution guidelines. -We're looking forward to your contributions and hope to hear from you soon on our [Forums](https://forums.percona.com) and [Discord](https://per.co.na/discord). +We're looking forward to your contributions and hope to hear from you soon on our [Forums](https://forums.percona.com). ## Submitting Bug Reports diff --git a/admin/agentlocal/agentlocal.go b/admin/agentlocal/agentlocal.go index b820568a9c..56e4fb3d5a 100644 --- a/admin/agentlocal/agentlocal.go +++ b/admin/agentlocal/agentlocal.go @@ -40,7 +40,7 @@ func SetTransport(ctx context.Context, debug bool, port uint32) { transport.Context = ctx // disable HTTP/2 - httpTransport := transport.Transport.(*http.Transport) + httpTransport := transport.Transport.(*http.Transport) //nolint:forcetypeassert httpTransport.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) client.Default.SetTransport(transport) diff --git a/admin/cli/cli.go b/admin/cli/cli.go index 83012c274f..36e5d4615f 100644 --- a/admin/cli/cli.go +++ b/admin/cli/cli.go @@ -130,7 +130,7 @@ func printResponse(opts *flags.GlobalFlags, res commands.Result, err error) erro logrus.Debugf("Result: %#v", res) logrus.Debugf("Error: %#v", err) - switch err := err.(type) { + switch err := err.(type) { //nolint:errorlint case nil: printSuccessResult(opts, res) os.Exit(0) diff --git a/admin/cmd/bootstrap.go b/admin/cmd/bootstrap.go index 06c1dec488..369a3a3fa4 100644 --- a/admin/cmd/bootstrap.go +++ b/admin/cmd/bootstrap.go @@ -181,7 +181,7 @@ func finishBootstrap(globalFlags *flags.GlobalFlags) { go func() { s := <-signals signal.Stop(signals) - logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) + logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert cancel() }() diff --git a/admin/commands/base/setup.go b/admin/commands/base/setup.go index d4ed9bdb49..46546105b8 100644 --- a/admin/commands/base/setup.go +++ b/admin/commands/base/setup.go @@ -53,6 +53,7 @@ var ( // SetupClients configures local and PMM Server API clients. func SetupClients(ctx context.Context, globalFlags *flags.GlobalFlags) { + //nolint:nestif if globalFlags.ServerURL == nil || globalFlags.ServerURL.String() == "" { status, err := agentlocal.GetStatus(agentlocal.DoNotRequestNetworkInfo) if err != nil { diff --git a/admin/commands/config.go b/admin/commands/config.go index 7d4f33ee2b..44cf7a0220 100644 --- a/admin/commands/config.go +++ b/admin/commands/config.go @@ -59,16 +59,21 @@ type ConfigCommand struct { LogLinesCount uint `help:"Take and return N most recent log lines in logs.zip for each: server, every configured exporters and agents" default:"1024"` } -func (cmd *ConfigCommand) args(globals *flags.GlobalFlags) (res []string, switchedToTLS bool) { +func (cmd *ConfigCommand) args(globals *flags.GlobalFlags) ([]string, bool) { port := globals.ServerURL.Port() if port == "" { port = "443" } + + var switchedToTLS bool + var res []string + if globals.ServerURL.Scheme == "http" { port = "443" switchedToTLS = true globals.SkipTLSCertificateCheck = true } + res = append(res, fmt.Sprintf("--server-address=%s:%s", globals.ServerURL.Hostname(), port)) if globals.ServerURL.User != nil { @@ -137,7 +142,7 @@ func (cmd *ConfigCommand) args(globals *flags.GlobalFlags) (res []string, switch res = append(res, cmd.NodeAddress, cmd.NodeType, cmd.NodeName) - return //nolint:nakedret + return res, switchedToTLS } // RunCmd runs config command. diff --git a/admin/commands/management/add.go b/admin/commands/management/add.go index 729101f916..59c623fe3f 100644 --- a/admin/commands/management/add.go +++ b/admin/commands/management/add.go @@ -60,19 +60,21 @@ type connectionGetter interface { // - addPostgreSQLCommand // - addMongoDBCommand // Returns service name, socket, host, port, error. -func processGlobalAddFlagsWithSocket(cmd connectionGetter, opts AddCommonFlags) (serviceName string, socket string, host string, port uint16, err error) { - serviceName = cmd.GetServiceName() +func processGlobalAddFlagsWithSocket(cmd connectionGetter, opts AddCommonFlags) (string, string, string, uint16, error) { + serviceName := cmd.GetServiceName() if opts.AddServiceNameFlag != "" { serviceName = opts.AddServiceNameFlag } - socket = cmd.GetSocket() + socket := cmd.GetSocket() address := cmd.GetAddress() if socket == "" && address == "" { address = cmd.GetDefaultAddress() } var portI int + var host string + var err error if address != "" { var portS string diff --git a/admin/commands/management/add_external_serverless.go b/admin/commands/management/add_external_serverless.go index 758d9186fa..f9893caa40 100644 --- a/admin/commands/management/add_external_serverless.go +++ b/admin/commands/management/add_external_serverless.go @@ -164,11 +164,11 @@ func (cmd *AddExternalServerlessCommand) RunCmd() (commands.Result, error) { }, nil } -func (cmd *AddExternalServerlessCommand) processURLFlags() (scheme, metricsPath, address string, port uint16, err error) { - scheme = cmd.Scheme - address = cmd.Host - port = cmd.ListenPort - metricsPath = cmd.MetricsPath +func (cmd *AddExternalServerlessCommand) processURLFlags() (string, string, string, uint16, error) { + scheme := cmd.Scheme + address := cmd.Host + port := cmd.ListenPort + metricsPath := cmd.MetricsPath switch { case cmd.URL != "": diff --git a/admin/commands/pmm/server/docker/deps.go b/admin/commands/pmm/server/docker/deps.go index b91d4a19b2..1c5eed963e 100644 --- a/admin/commands/pmm/server/docker/deps.go +++ b/admin/commands/pmm/server/docker/deps.go @@ -17,11 +17,11 @@ package docker import ( "context" "io" - "time" tea "github.com/charmbracelet/bubbletea" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" "github.com/percona/pmm/admin/pkg/docker" @@ -36,10 +36,10 @@ type Functions interface { ChangeServerPassword(ctx context.Context, containerID, newPassword string) error ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) - ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error + ContainerStop(ctx context.Context, containerID string, timeout *int) error ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) - CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*types.Volume, error) + ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) + CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*volume.Volume, error) FindServerContainers(ctx context.Context) ([]types.Container, error) GetDockerClient() *client.Client RunContainer(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, containerName string) (string, error) diff --git a/admin/commands/pmm/server/docker/mock_functions_test.go b/admin/commands/pmm/server/docker/mock_functions_test.go index 9f2ebe8351..bdb8413c80 100644 --- a/admin/commands/pmm/server/docker/mock_functions_test.go +++ b/admin/commands/pmm/server/docker/mock_functions_test.go @@ -5,11 +5,11 @@ package docker import ( context "context" io "io" - time "time" tea "github.com/charmbracelet/bubbletea" types "github.com/docker/docker/api/types" container "github.com/docker/docker/api/types/container" + volume "github.com/docker/docker/api/types/volume" client "github.com/docker/docker/client" mock "github.com/stretchr/testify/mock" @@ -57,11 +57,11 @@ func (_m *MockFunctions) ContainerInspect(ctx context.Context, containerID strin } // ContainerStop provides a mock function with given fields: ctx, containerID, timeout -func (_m *MockFunctions) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { +func (_m *MockFunctions) ContainerStop(ctx context.Context, containerID string, timeout *int) error { ret := _m.Called(ctx, containerID, timeout) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, *time.Duration) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, *int) error); ok { r0 = rf(ctx, containerID, timeout) } else { r0 = ret.Error(0) @@ -92,15 +92,15 @@ func (_m *MockFunctions) ContainerUpdate(ctx context.Context, containerID string } // ContainerWait provides a mock function with given fields: ctx, containerID, condition -func (_m *MockFunctions) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { +func (_m *MockFunctions) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { ret := _m.Called(ctx, containerID, condition) - var r0 <-chan container.ContainerWaitOKBody - if rf, ok := ret.Get(0).(func(context.Context, string, container.WaitCondition) <-chan container.ContainerWaitOKBody); ok { + var r0 <-chan container.WaitResponse + if rf, ok := ret.Get(0).(func(context.Context, string, container.WaitCondition) <-chan container.WaitResponse); ok { r0 = rf(ctx, containerID, condition) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan container.ContainerWaitOKBody) + r0 = ret.Get(0).(<-chan container.WaitResponse) } } @@ -117,15 +117,15 @@ func (_m *MockFunctions) ContainerWait(ctx context.Context, containerID string, } // CreateVolume provides a mock function with given fields: ctx, volumeName, labels -func (_m *MockFunctions) CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*types.Volume, error) { +func (_m *MockFunctions) CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*volume.Volume, error) { ret := _m.Called(ctx, volumeName, labels) - var r0 *types.Volume - if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string) *types.Volume); ok { + var r0 *volume.Volume + if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string) *volume.Volume); ok { r0 = rf(ctx, volumeName, labels) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Volume) + r0 = ret.Get(0).(*volume.Volume) } } diff --git a/admin/commands/pmm/server/docker/upgrade.go b/admin/commands/pmm/server/docker/upgrade.go index 7e3b2f95f8..62aa18b865 100644 --- a/admin/commands/pmm/server/docker/upgrade.go +++ b/admin/commands/pmm/server/docker/upgrade.go @@ -100,7 +100,7 @@ func (c *UpgradeCommand) RunCmdWithContext(ctx context.Context, globals *flags.G } logrus.Infof("Stopping PMM Server in container %q", currentContainer.Name) - noTimeout := -1 * time.Second + noTimeout := -1 if err = c.dockerFn.ContainerStop(ctx, currentContainer.ID, &noTimeout); err != nil { return nil, err } diff --git a/admin/pkg/docker/docker.go b/admin/pkg/docker/docker.go index a9a3950e2e..75921ef2a9 100644 --- a/admin/pkg/docker/docker.go +++ b/admin/pkg/docker/docker.go @@ -231,7 +231,7 @@ func (b *Base) RunContainer(ctx context.Context, config *container.Config, hostC var ErrVolumeExists = fmt.Errorf("VolumeExists") // CreateVolume first checks if the volume exists and creates it. -func (b *Base) CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*types.Volume, error) { +func (b *Base) CreateVolume(ctx context.Context, volumeName string, labels map[string]string) (*volume.Volume, error) { // We need to first manually check if the volume exists because // cli.VolumeCreate() does not complain if it already exists. v, err := b.Cli.VolumeList(ctx, filters.NewArgs(filters.Arg("name", volumeName))) @@ -252,7 +252,7 @@ func (b *Base) CreateVolume(ctx context.Context, volumeName string, labels map[s volumeLabels["percona.pmm"] = "server" - volume, err := b.Cli.VolumeCreate(ctx, volume.VolumeCreateBody{ //nolint:exhaustruct + volume, err := b.Cli.VolumeCreate(ctx, volume.CreateOptions{ //nolint:exhaustruct Name: volumeName, Labels: volumeLabels, }) @@ -269,8 +269,8 @@ func (b *Base) ContainerInspect(ctx context.Context, containerID string) (types. } // ContainerStop stops a container. -func (b *Base) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { - return b.Cli.ContainerStop(ctx, containerID, timeout) +func (b *Base) ContainerStop(ctx context.Context, containerID string, timeout *int) error { + return b.Cli.ContainerStop(ctx, containerID, container.StopOptions{Timeout: timeout}) } // ContainerUpdate updates container configuration. @@ -279,7 +279,7 @@ func (b *Base) ContainerUpdate(ctx context.Context, containerID string, updateCo } // ContainerWait waits until a container is in a specific state. -func (b *Base) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { +func (b *Base) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { return b.Cli.ContainerWait(ctx, containerID, condition) } diff --git a/agent/Makefile b/agent/Makefile index 4c657ecefa..a2334a7e7d 100644 --- a/agent/Makefile +++ b/agent/Makefile @@ -28,7 +28,7 @@ release: ## Build static pmm-agent release binary (Linux env CGO_ENABLED=1 go build -v -ldflags "-extldflags '-static' $(VERSION_FLAGS)" -tags 'osusergo netgo static_build' -o $(PMM_RELEASE_PATH)/pmm-agent go build -v -ldflags "-extldflags '-static' $(VERSION_FLAGS)" -tags 'osusergo netgo static_build' -o $(PMM_RELEASE_PATH)/pmm-agent-entrypoint ./cmd/pmm-agent-entrypoint $(PMM_RELEASE_PATH)/pmm-agent --version - -ldd $(PMM_RELEASE_PATH)/pmm-agent + ldd $(PMM_RELEASE_PATH)/pmm-agent 2>&1 | grep -Fq 'not a dynamic executable' release-dev: env CGO_ENABLED=1 go build -race -v -ldflags "$(VERSION_FLAGS)" -o $(PMM_RELEASE_PATH)/pmm-agent diff --git a/agent/agents/cache/cache.go b/agent/agents/cache/cache.go index 0de00b1e07..41c506981f 100644 --- a/agent/agents/cache/cache.go +++ b/agent/agents/cache/cache.go @@ -80,7 +80,7 @@ func (c *Cache) Get(dest interface{}) error { m := reflect.ValueOf(dest) for k, v := range c.items { - m.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v.Value.(*cacheItem).value)) + m.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(v.Value.(*cacheItem).value)) //nolint:forcetypeassert } return nil } @@ -97,10 +97,10 @@ func (c *Cache) Set(current interface{}) error { var wasTrimmed bool var next *list.Element - for e := c.itemsList.Front(); e != nil && now.Sub(e.Value.(*cacheItem).added) > c.retain; e = next { + for e := c.itemsList.Front(); e != nil && now.Sub(e.Value.(*cacheItem).added) > c.retain; e = next { //nolint:forcetypeassert c.removedN++ next = e.Next() - delete(c.items, c.itemsList.Remove(e).(*cacheItem).key) + delete(c.items, c.itemsList.Remove(e).(*cacheItem).key) //nolint:forcetypeassert } m := reflect.ValueOf(current) @@ -110,15 +110,15 @@ func (c *Cache) Set(current interface{}) error { value := iter.Value().Interface() if e, ok := c.items[key]; ok { c.updatedN++ - e.Value.(*cacheItem).added = now - e.Value.(*cacheItem).value = value + e.Value.(*cacheItem).added = now //nolint:forcetypeassert + e.Value.(*cacheItem).value = value //nolint:forcetypeassert c.itemsList.MoveToBack(e) } else { c.addedN++ c.items[key] = c.itemsList.PushBack(&cacheItem{key, value, now}) if uint(len(c.items)) > c.sizeLimit { - delete(c.items, c.itemsList.Remove(c.itemsList.Front()).(*cacheItem).key) + delete(c.items, c.itemsList.Remove(c.itemsList.Front()).(*cacheItem).key) //nolint:forcetypeassert c.removedN++ c.trimmedN++ wasTrimmed = true @@ -128,7 +128,7 @@ func (c *Cache) Set(current interface{}) error { if wasTrimmed { c.l.Debugf("Cache size exceeded the limit of %d items and the oldest values were trimmed. "+ "Now the oldest query in the cache is of time %s", - c.sizeLimit, c.itemsList.Front().Value.(*cacheItem).added.UTC().Format("2006-01-02T15:04:05Z")) + c.sizeLimit, c.itemsList.Front().Value.(*cacheItem).added.UTC().Format("2006-01-02T15:04:05Z")) //nolint:forcetypeassert } return nil } @@ -141,8 +141,8 @@ func (c *Cache) Stats() Stats { oldest := time.Unix(0, 0) newest := time.Unix(0, 0) if len(c.items) != 0 { - oldest = c.itemsList.Front().Value.(*cacheItem).added - newest = c.itemsList.Back().Value.(*cacheItem).added + oldest = c.itemsList.Front().Value.(*cacheItem).added //nolint:forcetypeassert + newest = c.itemsList.Back().Value.(*cacheItem).added //nolint:forcetypeassert } return Stats{ diff --git a/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go b/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go index b4951ad1a6..9a2bd129e1 100644 --- a/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go +++ b/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go @@ -277,10 +277,10 @@ func (a *Aggregator) createResult(ctx context.Context) *report.Result { } bucket.Common.MQueryTimeCnt = float32(v.Count) // TODO: Check is it right value - bucket.Common.MQueryTimeMax = float32(v.QueryTime.Max) - bucket.Common.MQueryTimeMin = float32(v.QueryTime.Min) - bucket.Common.MQueryTimeP99 = float32(v.QueryTime.Pct99) - bucket.Common.MQueryTimeSum = float32(v.QueryTime.Total) + bucket.Common.MQueryTimeMax = float32(v.QueryTime.Max) / 1000 + bucket.Common.MQueryTimeMin = float32(v.QueryTime.Min) / 1000 + bucket.Common.MQueryTimeP99 = float32(v.QueryTime.Pct99) / 1000 + bucket.Common.MQueryTimeSum = float32(v.QueryTime.Total) / 1000 bucket.Mongodb.MDocsReturnedCnt = float32(v.Count) // TODO: Check is it right value bucket.Mongodb.MDocsReturnedMax = float32(v.Returned.Max) diff --git a/agent/agents/mysql/perfschema/models.go b/agent/agents/mysql/perfschema/models.go index aeb536c007..6bfe62b19c 100644 --- a/agent/agents/mysql/perfschema/models.go +++ b/agent/agents/mysql/perfschema/models.go @@ -74,9 +74,9 @@ type eventsStatementsHistory struct { // TimerEnd *int64 `reform:"TIMER_END"` // TimerWait *int64 `reform:"TIMER_WAIT"` // LockTime int64 `reform:"LOCK_TIME"` - SQLText *string `reform:"SQL_TEXT"` - Digest *string `reform:"DIGEST"` - // DigestText *string `reform:"DIGEST_TEXT"` + SQLText *string `reform:"SQL_TEXT"` + Digest *string `reform:"DIGEST"` + DigestText *string `reform:"DIGEST_TEXT"` CurrentSchema *string `reform:"CURRENT_SCHEMA"` // ObjectType *string `reform:"OBJECT_TYPE"` // ObjectSchema *string `reform:"OBJECT_SCHEMA"` diff --git a/agent/agents/mysql/perfschema/models_reform.go b/agent/agents/mysql/perfschema/models_reform.go index 0a829a04cd..89748911bf 100644 --- a/agent/agents/mysql/perfschema/models_reform.go +++ b/agent/agents/mysql/perfschema/models_reform.go @@ -322,6 +322,7 @@ func (v *eventsStatementsHistoryViewType) Columns() []string { return []string{ "SQL_TEXT", "DIGEST", + "DIGEST_TEXT", "CURRENT_SCHEMA", } } @@ -340,6 +341,7 @@ var eventsStatementsHistoryView = &eventsStatementsHistoryViewType{ Fields: []parse.FieldInfo{ {Name: "SQLText", Type: "*string", Column: "SQL_TEXT"}, {Name: "Digest", Type: "*string", Column: "DIGEST"}, + {Name: "DigestText", Type: "*string", Column: "DIGEST_TEXT"}, {Name: "CurrentSchema", Type: "*string", Column: "CURRENT_SCHEMA"}, }, PKFieldIndex: -1, @@ -349,10 +351,11 @@ var eventsStatementsHistoryView = &eventsStatementsHistoryViewType{ // String returns a string representation of this struct or record. func (s eventsStatementsHistory) String() string { - res := make([]string, 3) + res := make([]string, 4) res[0] = "SQLText: " + reform.Inspect(s.SQLText, true) res[1] = "Digest: " + reform.Inspect(s.Digest, true) - res[2] = "CurrentSchema: " + reform.Inspect(s.CurrentSchema, true) + res[2] = "DigestText: " + reform.Inspect(s.DigestText, true) + res[3] = "CurrentSchema: " + reform.Inspect(s.CurrentSchema, true) return strings.Join(res, ", ") } @@ -362,6 +365,7 @@ func (s *eventsStatementsHistory) Values() []interface{} { return []interface{}{ s.SQLText, s.Digest, + s.DigestText, s.CurrentSchema, } } @@ -372,6 +376,7 @@ func (s *eventsStatementsHistory) Pointers() []interface{} { return []interface{}{ &s.SQLText, &s.Digest, + &s.DigestText, &s.CurrentSchema, } } diff --git a/agent/agents/mysql/perfschema/perfschema.go b/agent/agents/mysql/perfschema/perfschema.go index a41618ee64..1293219394 100644 --- a/agent/agents/mysql/perfschema/perfschema.go +++ b/agent/agents/mysql/perfschema/perfschema.go @@ -326,6 +326,7 @@ func (m *PerfSchema) getNewBuckets(periodStart time.Time, periodLengthSecs uint3 b.Common.PeriodStartUnixSecs = startS b.Common.PeriodLengthSecs = periodLengthSecs + //nolint:nestif if esh := history[b.Common.Queryid]; esh != nil { // TODO test if we really need that // If we don't need it, we can avoid polling events_statements_history completely @@ -335,17 +336,13 @@ func (m *PerfSchema) getNewBuckets(periodStart time.Time, periodLengthSecs uint3 } if esh.SQLText != nil { - explainFingerprint, placeholdersCount, err := queryparser.MySQL(*esh.SQLText) - if err != nil { - m.l.Debugf("cannot parse query: %s", *esh.SQLText) - } else { - explainFingerprint, truncated := truncate.Query(explainFingerprint, m.maxQueryLength) - if truncated { - b.Common.IsTruncated = truncated - } - b.Common.ExplainFingerprint = explainFingerprint - b.Common.PlaceholdersCount = placeholdersCount + explainFingerprint, placeholdersCount := queryparser.GetMySQLFingerprintPlaceholders(*esh.SQLText, *esh.DigestText) + explainFingerprint, truncated := truncate.Query(explainFingerprint, m.maxQueryLength) + if truncated { + b.Common.IsTruncated = truncated } + b.Common.ExplainFingerprint = explainFingerprint + b.Common.PlaceholdersCount = placeholdersCount if !m.disableQueryExamples { example, truncated := truncate.Query(*esh.SQLText, m.maxQueryLength) diff --git a/agent/agents/mysql/perfschema/perfschema_test.go b/agent/agents/mysql/perfschema/perfschema_test.go index 36ff954907..5510c29e7f 100644 --- a/agent/agents/mysql/perfschema/perfschema_test.go +++ b/agent/agents/mysql/perfschema/perfschema_test.go @@ -304,8 +304,8 @@ func TestPerfSchema(t *testing.T) { case "10.4-mariadb": digests = map[string]string{ - "SELECT `sleep` (?)": "53be0f409af1ccb13906186e1173d977", - "SELECT * FROM `city`": "0d4348c89b36f2739b082c2aef07b3d4", + "SELECT `sleep` (?)": "ce5b40e78030bb319c84965637255c18", + "SELECT * FROM `city`": "978a3813c9f566d7a72d65b88a9149d9", } default: @@ -336,7 +336,7 @@ func TestPerfSchema(t *testing.T) { assert.InDelta(t, 0.1, actual.Common.MQueryTimeSum, 0.09) expected := &agentpb.MetricsBucket{ Common: &agentpb.MetricsBucket_Common{ - ExplainFingerprint: "select /* Sleep */ sleep(:1) from dual", + ExplainFingerprint: "SELECT `sleep` (:1)", PlaceholdersCount: 1, Fingerprint: "SELECT `sleep` (?)", Schema: "world", @@ -382,7 +382,7 @@ func TestPerfSchema(t *testing.T) { assert.InDelta(t, 0, actual.Mysql.MLockTimeSum, 0.09) expected := &agentpb.MetricsBucket{ Common: &agentpb.MetricsBucket_Common{ - ExplainFingerprint: "select /* AllCities */ * from city", + ExplainFingerprint: "SELECT * FROM `city`", Fingerprint: "SELECT * FROM `city`", Schema: "world", AgentId: "agent_id", @@ -453,6 +453,8 @@ func TestPerfSchema(t *testing.T) { assert.InDelta(t, 0, actual.Mysql.MLockTimeSum, 0.09) expected := &agentpb.MetricsBucket{ Common: &agentpb.MetricsBucket_Common{ + ExplainFingerprint: "SELECT * FROM `t1` WHERE `col1` = :1", + PlaceholdersCount: 1, Fingerprint: "SELECT * FROM `t1` WHERE `col1` = ?", Schema: "world", AgentId: "agent_id", diff --git a/agent/agents/mysql/slowlog/slowlog.go b/agent/agents/mysql/slowlog/slowlog.go index 2570ebee7f..381db27287 100644 --- a/agent/agents/mysql/slowlog/slowlog.go +++ b/agent/agents/mysql/slowlog/slowlog.go @@ -165,11 +165,13 @@ func (s *SlowLog) Run(ctx context.Context) { } // recheck returns new slowlog information, and rotates slowlog file if needed. -func (s *SlowLog) recheck(ctx context.Context) (newInfo *slowLogInfo) { +func (s *SlowLog) recheck(ctx context.Context) *slowLogInfo { + var newInfo *slowLogInfo + db, err := sql.Open("mysql", s.params.DSN) if err != nil { s.l.Errorf("Cannot open database connection: %s", err) - return + return nil } defer db.Close() //nolint:errcheck @@ -177,17 +179,17 @@ func (s *SlowLog) recheck(ctx context.Context) (newInfo *slowLogInfo) { row := db.QueryRowContext(ctx, "SHOW GRANTS") if err := row.Scan(&grants); err != nil { s.l.Errorf("Cannot scan db user privileges: %s", err) - return + return nil } if !strings.Contains(grants, "RELOAD") && !strings.Contains(grants, "ALL PRIVILEGES") { s.l.Error("RELOAD grant not enabled, cannot rotate slowlog") - return + return nil } if newInfo, err = s.getSlowLogInfo(ctx); err != nil { s.l.Error(err) - return + return nil } if s.params.SlowLogFilePrefix != "" { newInfo.path = filepath.Join(s.params.SlowLogFilePrefix, newInfo.path) @@ -195,13 +197,13 @@ func (s *SlowLog) recheck(ctx context.Context) (newInfo *slowLogInfo) { maxSize := s.params.MaxSlowlogFileSize if maxSize <= 0 { - return + return newInfo } fi, err := os.Stat(newInfo.path) if err != nil { s.l.Errorf("Failed to stat file: %s", err) - return + return newInfo } if size := fi.Size(); size > maxSize { s.l.Infof("Rotating slowlog file: %d > %d.", size, maxSize) @@ -209,7 +211,8 @@ func (s *SlowLog) recheck(ctx context.Context) (newInfo *slowLogInfo) { s.l.Error(err) } } - return + + return newInfo } // getSlowLogInfo returns information about slowlog settings. @@ -372,7 +375,7 @@ func (s *SlowLog) processFile(ctx context.Context, file string, outlierTime floa case <-t.C: lengthS := uint32(math.Round(wait.Seconds())) // round 59.9s/60.1s to 60s res := aggregator.Finalize() - buckets := makeBuckets(s.params.AgentID, res, start, lengthS, s.params.DisableQueryExamples, s.params.MaxQueryLength, s.l) + buckets := makeBuckets(s.params.AgentID, res, start, lengthS, s.params.DisableQueryExamples, s.params.MaxQueryLength) s.l.Debugf("Made %d buckets out of %d classes in %s+%d interval. Wait time: %s.", len(buckets), len(res.Class), start.Format("15:04:05"), lengthS, time.Since(start)) @@ -397,7 +400,6 @@ func makeBuckets( periodLengthSecs uint32, disableQueryExamples bool, maxQueryLength int32, - l *logrus.Entry, ) []*agentpb.MetricsBucket { buckets := make([]*agentpb.MetricsBucket, 0, len(res.Class)) @@ -434,17 +436,13 @@ func makeBuckets( } if q != "" { - explainFingerprint, placeholdersCount, err := queryparser.MySQL(v.Example.Query) - if err != nil { - l.Debugf("cannot parse query: %s", v.Example.Query) - } else { - explainFingerprint, truncated := truncate.Query(explainFingerprint, maxQueryLength) - if truncated { - mb.Common.IsTruncated = truncated - } - mb.Common.ExplainFingerprint = explainFingerprint - mb.Common.PlaceholdersCount = placeholdersCount + explainFingerprint, placeholdersCount := queryparser.GetMySQLFingerprintPlaceholders(q, fingerprint) + explainFingerprint, truncated := truncate.Query(explainFingerprint, maxQueryLength) + if truncated { + mb.Common.IsTruncated = truncated } + mb.Common.ExplainFingerprint = explainFingerprint + mb.Common.PlaceholdersCount = placeholdersCount } if v.Example != nil { diff --git a/agent/agents/mysql/slowlog/slowlog_expected.json b/agent/agents/mysql/slowlog/slowlog_expected.json index b7efa1fbbe..1a468d01ff 100644 --- a/agent/agents/mysql/slowlog/slowlog_expected.json +++ b/agent/agents/mysql/slowlog/slowlog_expected.json @@ -2,7 +2,7 @@ { "common": { "queryid": "C684EA1D78348D23", - "explain_fingerprint": "select `name`, subsystem, type, `comment`, `count` from information_schema.innodb_metrics where `status` = :1", + "explain_fingerprint": "select name, subsystem, type, comment, count from information_schema.innodb_metrics where status = :1", "placeholders_count": 1, "fingerprint": "select name, subsystem, type, comment, count from information_schema.innodb_metrics where status = ?", "username": "root", @@ -46,7 +46,7 @@ { "common": { "queryid": "FA521F3C42DC5272", - "explain_fingerprint": "select pad from sbtest1 where id = :1", + "explain_fingerprint": "select pad from sbtest1 where id=:1", "placeholders_count": 1, "fingerprint": "select pad from sbtest1 where id=?", "schema": "sbtest", @@ -91,6 +91,7 @@ { "common": { "queryid": "1D410B4BE5060972", + "explain_fingerprint": "ping", "fingerprint": "ping", "username": "root", "agent_id": "/agent_id/73ee2f92-d5aa-45f0-8b09-6d3df605fd44", @@ -164,7 +165,8 @@ { "common": { "queryid": "328AAB9660C2879E", - "explain_fingerprint": "set @@lock_wait_timeout = 2", + "explain_fingerprint": "set lock_wait_timeout=:1", + "placeholders_count": 1, "fingerprint": "set lock_wait_timeout=?", "username": "root", "agent_id": "/agent_id/73ee2f92-d5aa-45f0-8b09-6d3df605fd44", @@ -195,7 +197,7 @@ { "common": { "queryid": "A212AD93263CF26F", - "explain_fingerprint": "select @@version from dual", + "explain_fingerprint": "select @@version", "fingerprint": "select @@version", "username": "root", "agent_id": "/agent_id/73ee2f92-d5aa-45f0-8b09-6d3df605fd44", diff --git a/agent/agents/mysql/slowlog/slowlog_test.go b/agent/agents/mysql/slowlog/slowlog_test.go index 08ff8301ed..6e0b338ded 100644 --- a/agent/agents/mysql/slowlog/slowlog_test.go +++ b/agent/agents/mysql/slowlog/slowlog_test.go @@ -62,12 +62,12 @@ func TestSlowLogMakeBucketsInvalidUTF8(t *testing.T) { }, } - actualBuckets := makeBuckets(agentID, parsingResult, periodStart, 60, false, truncate.GetDefaultMaxQueryLength(), logrus.NewEntry(logrus.New())) + actualBuckets := makeBuckets(agentID, parsingResult, periodStart, 60, false, truncate.GetDefaultMaxQueryLength()) expectedBuckets := []*agentpb.MetricsBucket{ { Common: &agentpb.MetricsBucket_Common{ Fingerprint: "select * from contacts t0 where t0.person_id = ?", - ExplainFingerprint: "select * from contacts as t0 where t0.person_id = :1", + ExplainFingerprint: "select * from contacts t0 where t0.person_id = :1", PlaceholdersCount: 1, AgentId: agentID, AgentType: inventorypb.AgentType_QAN_MYSQL_SLOWLOG_AGENT, @@ -94,7 +94,7 @@ func TestSlowLogMakeBuckets(t *testing.T) { parsingResult := event.Result{} getDataFromFile(t, "slowlog_fixture.json", &parsingResult) - actualBuckets := makeBuckets(agentID, parsingResult, periodStart, 60, false, truncate.GetDefaultMaxQueryLength(), logrus.NewEntry(logrus.New())) + actualBuckets := makeBuckets(agentID, parsingResult, periodStart, 60, false, truncate.GetDefaultMaxQueryLength()) var expectedBuckets []*agentpb.MetricsBucket getDataFromFile(t, "slowlog_expected.json", &expectedBuckets) diff --git a/agent/agents/postgres/parser/parser.go b/agent/agents/postgres/parser/parser.go index 55d23bc2ea..a0d67220cc 100644 --- a/agent/agents/postgres/parser/parser.go +++ b/agent/agents/postgres/parser/parser.go @@ -28,7 +28,10 @@ import ( var extractTablesRecover = true // ExtractTables extracts table names from query. -func ExtractTables(query string) (tables []string, err error) { +func ExtractTables(query string) ([]string, error) { + var err error + var tables []string //nolint:prealloc + if extractTablesRecover { defer func() { if r := recover(); r != nil { @@ -41,21 +44,21 @@ func ExtractTables(query string) (tables []string, err error) { var jsonTree string if jsonTree, err = pgquery.ParseToJSON(query); err != nil { err = errors.Wrap(err, "error on parsing sql query") - return + return nil, err } var res []string tableNames := make(map[string]struct{}) res, err = extract(jsonTree, `"relname":"`, `"`) if err != nil { - return + return nil, err } for _, v := range res { tableNames[v] = struct{}{} } res, err = extract(jsonTree, `"ctename":"`, `"`) if err != nil { - return + return nil, err } for _, v := range res { delete(tableNames, v) @@ -66,7 +69,7 @@ func ExtractTables(query string) (tables []string, err error) { } sort.Strings(tables) - return + return tables, nil } func extract(query, pre, post string) ([]string, error) { diff --git a/agent/agents/postgres/pgstatmonitor/models.go b/agent/agents/postgres/pgstatmonitor/models.go index 118d756d9d..218fb368ec 100644 --- a/agent/agents/postgres/pgstatmonitor/models.go +++ b/agent/agents/postgres/pgstatmonitor/models.go @@ -50,14 +50,6 @@ type pgStatMonitorSettingsTextValue struct { Value string `reform:"value"` } -// pgStatMonitorSettingsTextValue represents a row in pg_stat_monitor_settings view 2.0.0-dev and higher. -// -//reform:pg_stat_monitor_settings -type pgStatMonitorSettingsTextValue20 struct { - Name string `reform:"name"` - Setting string `reform:"setting"` -} - // pgStatMonitorExtended contains pgStatMonitor data and extends it with database, username and tables data. // It's made for performance reason. type pgStatMonitorExtended struct { diff --git a/agent/agents/postgres/pgstatmonitor/models_reform.go b/agent/agents/postgres/pgstatmonitor/models_reform.go index ed13e087a0..38344853ad 100644 --- a/agent/agents/postgres/pgstatmonitor/models_reform.go +++ b/agent/agents/postgres/pgstatmonitor/models_reform.go @@ -332,90 +332,9 @@ var ( _ fmt.Stringer = (*pgStatMonitorSettingsTextValue)(nil) ) -type pgStatMonitorSettingsTextValue20ViewType struct { - s parse.StructInfo - z []interface{} -} - -// Schema returns a schema name in SQL database (""). -func (v *pgStatMonitorSettingsTextValue20ViewType) Schema() string { - return v.s.SQLSchema -} - -// Name returns a view or table name in SQL database ("pg_stat_monitor_settings"). -func (v *pgStatMonitorSettingsTextValue20ViewType) Name() string { - return v.s.SQLName -} - -// Columns returns a new slice of column names for that view or table in SQL database. -func (v *pgStatMonitorSettingsTextValue20ViewType) Columns() []string { - return []string{ - "name", - "setting", - } -} - -// NewStruct makes a new struct for that view or table. -func (v *pgStatMonitorSettingsTextValue20ViewType) NewStruct() reform.Struct { - return new(pgStatMonitorSettingsTextValue20) -} - -// pgStatMonitorSettingsTextValue20View represents pg_stat_monitor_settings view or table in SQL database. -var pgStatMonitorSettingsTextValue20View = &pgStatMonitorSettingsTextValue20ViewType{ - s: parse.StructInfo{ - Type: "pgStatMonitorSettingsTextValue20", - SQLName: "pg_stat_monitor_settings", - Fields: []parse.FieldInfo{ - {Name: "Name", Type: "string", Column: "name"}, - {Name: "Setting", Type: "string", Column: "setting"}, - }, - PKFieldIndex: -1, - }, - z: new(pgStatMonitorSettingsTextValue20).Values(), -} - -// String returns a string representation of this struct or record. -func (s pgStatMonitorSettingsTextValue20) String() string { - res := make([]string, 2) - res[0] = "Name: " + reform.Inspect(s.Name, true) - res[1] = "Setting: " + reform.Inspect(s.Setting, true) - return strings.Join(res, ", ") -} - -// Values returns a slice of struct or record field values. -// Returned interface{} values are never untyped nils. -func (s *pgStatMonitorSettingsTextValue20) Values() []interface{} { - return []interface{}{ - s.Name, - s.Setting, - } -} - -// Pointers returns a slice of pointers to struct or record fields. -// Returned interface{} values are never untyped nils. -func (s *pgStatMonitorSettingsTextValue20) Pointers() []interface{} { - return []interface{}{ - &s.Name, - &s.Setting, - } -} - -// View returns View object for that struct. -func (s *pgStatMonitorSettingsTextValue20) View() reform.View { - return pgStatMonitorSettingsTextValue20View -} - -// check interfaces -var ( - _ reform.View = pgStatMonitorSettingsTextValue20View - _ reform.Struct = (*pgStatMonitorSettingsTextValue20)(nil) - _ fmt.Stringer = (*pgStatMonitorSettingsTextValue20)(nil) -) - func init() { parse.AssertUpToDate(&pgStatDatabaseView.s, new(pgStatDatabase)) parse.AssertUpToDate(&pgUserView.s, new(pgUser)) parse.AssertUpToDate(&pgStatMonitorSettingsView.s, new(pgStatMonitorSettings)) parse.AssertUpToDate(&pgStatMonitorSettingsTextValueView.s, new(pgStatMonitorSettingsTextValue)) - parse.AssertUpToDate(&pgStatMonitorSettingsTextValue20View.s, new(pgStatMonitorSettingsTextValue20)) } diff --git a/agent/agents/postgres/pgstatmonitor/pgstatmonitor.go b/agent/agents/postgres/pgstatmonitor/pgstatmonitor.go index 1d70f24bdb..2b9fce1110 100644 --- a/agent/agents/postgres/pgstatmonitor/pgstatmonitor.go +++ b/agent/agents/postgres/pgstatmonitor/pgstatmonitor.go @@ -21,7 +21,6 @@ import ( "fmt" "io" "strconv" - "strings" "time" "github.com/AlekSi/pointer" @@ -86,8 +85,8 @@ const ( ) const ( - queryTag = "pmm-agent:pgstatmonitor" - pgsm20SettingsViewQuery = "CREATE VIEW pg_stat_monitor_settings AS SELECT * FROM pg_settings WHERE name like 'pg_stat_monitor.%';" + queryTag = "pmm-agent:pgstatmonitor" + pgsm20SettingsQuery = "SELECT name, setting FROM pg_settings WHERE name like 'pg_stat_monitor.%'" // There is a feature in the FE that shows "n/a" for empty responses for dimensions. commandTextNotAvailable = "" commandTypeSelect = "SELECT" @@ -150,11 +149,11 @@ func newPgStatMonitorQAN(q *reform.Querier, dbCloser io.Closer, agentID string, }, nil } -func getPGVersion(q *reform.Querier) (vPG pgVersion, err error) { +func getPGVersion(q *reform.Querier) (pgVersion, error) { var v string - err = q.QueryRow(fmt.Sprintf("SELECT /* %s */ version()", queryTag)).Scan(&v) + err := q.QueryRow(fmt.Sprintf("SELECT /* %s */ version()", queryTag)).Scan(&v) if err != nil { - return + return pgVersion(0), err } v = version.ParsePostgreSQLVersion(v) @@ -350,7 +349,43 @@ func (m *PGStatMonitorQAN) checkDefaultWaitTime(waitTime time.Duration) bool { return true } -type settings map[string]*pgStatMonitorSettingsTextValue +type ( + settings map[string]*pgStatMonitorSettingsTextValue + pgsm20Settings struct { + Name string + Setting string + } +) + +func getPGSM20Settings(q *reform.Querier) (settings, error) { + rows, err := q.Query(pgsm20SettingsQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + result := make(settings) + for rows.Next() { + var setting pgsm20Settings + err = rows.Scan( + &setting.Name, + &setting.Setting) + if err != nil { + return nil, err + } + + result[setting.Name] = &pgStatMonitorSettingsTextValue{ + Name: setting.Name, + Value: setting.Setting, + } + } + err = rows.Err() + if err != nil { + return nil, err + } + + return result, nil +} func (m *PGStatMonitorQAN) getSettings() (settings, error) { settingsValuesAreText, err := areSettingsTextValues(m.q) @@ -363,51 +398,39 @@ func (m *PGStatMonitorQAN) getSettings() (settings, error) { return nil, err } - var settingsRows []reform.Struct - if settingsValuesAreText { + result := make(settings) + if settingsValuesAreText { //nolint:nestif if pgsmVersion >= pgStatMonitorVersion20PG12 { - // In case of PGSM 2.0 and above we need create view first. - _, errSettings := m.q.Exec(pgsm20SettingsViewQuery) - // If it is already existing we just igroning error. - if errSettings != nil && !strings.Contains(errSettings.Error(), "already exists") { - return nil, errSettings + result, err = getPGSM20Settings(m.q) + if err != nil { + return nil, err } - - settingsRows, err = m.q.SelectAllFrom(pgStatMonitorSettingsTextValue20View, "") } else { - settingsRows, err = m.q.SelectAllFrom(pgStatMonitorSettingsTextValueView, "") + settingsRows, err := m.q.SelectAllFrom(pgStatMonitorSettingsTextValueView, "") + if err != nil { + return nil, err + } + for _, row := range settingsRows { + setting := row.(*pgStatMonitorSettingsTextValue) //nolint:forcetypeassert + result[setting.Name] = setting + } } } else { - settingsRows, err = m.q.SelectAllFrom(pgStatMonitorSettingsView, "") - } - if err != nil { - return nil, errors.Wrap(err, "failed to get settings") - } - - settings := make(settings) - for _, row := range settingsRows { - if settingsValuesAreText { - if pgsmVersion >= pgStatMonitorVersion20PG12 { - setting := row.(*pgStatMonitorSettingsTextValue20) - settings[setting.Name] = &pgStatMonitorSettingsTextValue{ - Name: setting.Name, - Value: setting.Setting, - } - } else { - setting := row.(*pgStatMonitorSettingsTextValue) - settings[setting.Name] = setting - } - } else { - setting := row.(*pgStatMonitorSettings) + settingsRows, err := m.q.SelectAllFrom(pgStatMonitorSettingsView, "") + if err != nil { + return nil, err + } + for _, row := range settingsRows { + setting := row.(*pgStatMonitorSettings) //nolint:forcetypeassert name := setting.Name - settings[name] = &pgStatMonitorSettingsTextValue{ + result[name] = &pgStatMonitorSettingsTextValue{ Name: name, Value: fmt.Sprintf("%d", setting.Value), } } } - return settings, nil + return result, nil } func (s settings) getNormalizedQueryValue() (bool, error) { diff --git a/agent/agents/postgres/pgstatmonitor/stat_monitor_cache.go b/agent/agents/postgres/pgstatmonitor/stat_monitor_cache.go index a18b9880d5..0b3801cd9d 100644 --- a/agent/agents/postgres/pgstatmonitor/stat_monitor_cache.go +++ b/agent/agents/postgres/pgstatmonitor/stat_monitor_cache.go @@ -62,7 +62,12 @@ func newStatMonitorCache(l *logrus.Entry) *statMonitorCache { // getStatMonitorExtended returns the current state of pg_stat_monitor table with extended information (database, username) // and the previous cashed state grouped by bucket start time. -func (ssc *statMonitorCache) getStatMonitorExtended(ctx context.Context, q *reform.Querier, normalizedQuery bool, maxQueryLength int32) (current, cache map[time.Time]map[string]*pgStatMonitorExtended, err error) { //nolint:lll +func (ssc *statMonitorCache) getStatMonitorExtended( + ctx context.Context, + q *reform.Querier, + normalizedQuery bool, + maxQueryLength int32, +) (map[time.Time]map[string]*pgStatMonitorExtended, map[time.Time]map[string]*pgStatMonitorExtended, error) { var totalN, newN, newSharedN, oldN int start := time.Now() defer func() { @@ -71,8 +76,8 @@ func (ssc *statMonitorCache) getStatMonitorExtended(ctx context.Context, q *refo }() ssc.rw.RLock() - current = make(map[time.Time]map[string]*pgStatMonitorExtended) - cache = make(map[time.Time]map[string]*pgStatMonitorExtended) + current := make(map[time.Time]map[string]*pgStatMonitorExtended) + cache := make(map[time.Time]map[string]*pgStatMonitorExtended) for k, v := range ssc.items { cache[k] = v } @@ -85,33 +90,35 @@ func (ssc *statMonitorCache) getStatMonitorExtended(ctx context.Context, q *refo vPG, err := getPGVersion(q) if err != nil { err = errors.Wrap(err, "failed to get PG version") - return + return current, cache, err } ssc.l.Infof("pg version = %f", vPG) vPGSM, _, err := getPGMonitorVersion(q) if err != nil { err = errors.Wrap(err, "failed to get row and view for pg_stat_monitor version") - return + return current, cache, err } ssc.l.Infof("pg monitor version = %d", vPGSM) row, view := newPgStatMonitorStructs(vPGSM, vPG) conditions := "WHERE queryid IS NOT NULL AND query IS NOT NULL" if vPGSM >= pgStatMonitorVersion09 && vPGSM < pgStatMonitorVersion20PG12 { - // only pg_stat_monitor from 0.9.0 until 2.0.0 supports state_code. It tells what is the query's current state. + // Only pg_stat_monitor from 0.9.0 until 2.0.0 supports state_code. It tells what is the query's current state. // To have correct data in QAN, we have to get only queries that are either 'FINISHED' or 'FINISHED WITH ERROR'. conditions += " AND (state_code = 3 OR state_code = 4)" ssc.l.Debug("PGSM version with state and state_code") } if vPGSM >= pgStatMonitorVersion20PG12 { - // since version above we should scrape only buckets where bucket_done = true - conditions += " AND bucket_done" + // Since version above we should scrape only buckets where bucket_done = true + // and they have valid pgsm_query_id. Anyway empty pgsm_query_id means problem + // on PGSM side (query with error, bad setup etc). + conditions += " AND bucket_done AND pgsm_query_id IS NOT NULL" } rows, e := q.SelectRows(view, conditions) if e != nil { err = errors.Wrap(e, "failed to query pg_stat_monitor") - return + return current, cache, err } defer rows.Close() //nolint:errcheck @@ -154,6 +161,7 @@ func (ssc *statMonitorCache) getStatMonitorExtended(ctx context.Context, q *refo } } + //nolint:nestif if c.Fingerprint == "" { newN++ fingerprint := c.Query @@ -251,7 +259,7 @@ func queryDatabases(q *reform.Querier) map[int64]string { res := make(map[int64]string, len(structs)) for _, str := range structs { - d := str.(*pgStatDatabase) + d := str.(*pgStatDatabase) //nolint:forcetypeassert res[d.DatID] = pointer.GetString(d.DatName) } return res @@ -265,7 +273,7 @@ func queryUsernames(q *reform.Querier) map[int64]string { res := make(map[int64]string, len(structs)) for _, str := range structs { - u := str.(*pgUser) + u := str.(*pgUser) //nolint:forcetypeassert res[u.UserID] = pointer.GetString(u.UserName) } return res diff --git a/agent/agents/postgres/pgstatstatements/pgstatstatements.go b/agent/agents/postgres/pgstatstatements/pgstatstatements.go index 61ffe77993..b60b32257f 100644 --- a/agent/agents/postgres/pgstatstatements/pgstatstatements.go +++ b/agent/agents/postgres/pgstatstatements/pgstatstatements.go @@ -105,11 +105,12 @@ func newPgStatStatementsQAN(q *reform.Querier, dbCloser io.Closer, agentID strin }, nil } -func getPgStatVersion(q *reform.Querier) (pgVersion semver.Version, err error) { +func getPgStatVersion(q *reform.Querier) (semver.Version, error) { var v string - err = q.QueryRow(fmt.Sprintf("SELECT /* %s */ extVersion FROM pg_extension WHERE pg_extension.extname = 'pg_stat_statements'", queryTag)).Scan(&v) + var pgVersion semver.Version + err := q.QueryRow(fmt.Sprintf("SELECT /* %s */ extVersion FROM pg_extension WHERE pg_extension.extname = 'pg_stat_statements'", queryTag)).Scan(&v) if err != nil { - return + return pgVersion, err } switch strings.Count(v, ".") { @@ -209,18 +210,23 @@ func (m *PGStatStatementsQAN) Run(ctx context.Context) { // getStatStatementsExtended returns the current state of pg_stat_statements table with extended information (database, username, tables) // and the previous cashed state. -func (m *PGStatStatementsQAN) getStatStatementsExtended(ctx context.Context, q *reform.Querier, maxQueryLength int32) (current, prev statementsMap, err error) { +func (m *PGStatStatementsQAN) getStatStatementsExtended( + ctx context.Context, + q *reform.Querier, + maxQueryLength int32, +) (statementsMap, statementsMap, error) { var totalN, newN, newSharedN, oldN int + var err error start := time.Now() defer func() { dur := time.Since(start) m.l.Debugf("Selected %d rows from pg_stat_statements in %s: %d new (%d shared tables), %d old.", totalN, dur, newN, newSharedN, oldN) }() - current = make(statementsMap, m.statementsCache.cache.Len()) - prev = make(statementsMap, m.statementsCache.cache.Len()) - if err = m.statementsCache.Get(prev); err != nil { - return + current := make(statementsMap, m.statementsCache.cache.Len()) + prev := make(statementsMap, m.statementsCache.cache.Len()) + if err := m.statementsCache.Get(prev); err != nil { + return nil, nil, err } // load all databases and usernames first as we can't use querier while iterating over rows below @@ -230,7 +236,7 @@ func (m *PGStatStatementsQAN) getStatStatementsExtended(ctx context.Context, q * rows, e := rowsByVersion(q, "WHERE queryid IS NOT NULL AND query IS NOT NULL") if e != nil { err = e - return + return nil, nil, err } defer rows.Close() //nolint:errcheck diff --git a/agent/agents/postgres/pgstatstatements/utils.go b/agent/agents/postgres/pgstatstatements/utils.go index 06e1d461f6..d1c62f5414 100644 --- a/agent/agents/postgres/pgstatstatements/utils.go +++ b/agent/agents/postgres/pgstatstatements/utils.go @@ -52,7 +52,7 @@ func queryDatabases(q *reform.Querier) map[int64]string { res := make(map[int64]string, len(structs)) for _, str := range structs { - d := str.(*pgStatDatabase) + d := str.(*pgStatDatabase) //nolint:forcetypeassert res[d.DatID] = pointer.GetString(d.DatName) } return res @@ -66,7 +66,7 @@ func queryUsernames(q *reform.Querier) map[int64]string { res := make(map[int64]string, len(structs)) for _, str := range structs { - u := str.(*pgUser) + u := str.(*pgUser) //nolint:forcetypeassert res[u.UserID] = pointer.GetString(u.UserName) } return res diff --git a/agent/agents/process/process_logger.go b/agent/agents/process/process_logger.go index cb718c2ab2..5c986612de 100644 --- a/agent/agents/process/process_logger.go +++ b/agent/agents/process/process_logger.go @@ -55,7 +55,7 @@ func newProcessLogger(l *logrus.Entry, lines int, redactWords []string) *process // Write implements io.Writer. // This method is thread-safe. -func (pl *processLogger) Write(p []byte) (n int, err error) { +func (pl *processLogger) Write(p []byte) (n int, err error) { //nolint:nonamedreturns pl.m.Lock() defer pl.m.Unlock() @@ -132,7 +132,7 @@ func replacer(redactWords []string) *strings.Replacer { var extractLogLevelRegex = regexp.MustCompile(`level=(\w+)`) -func extractLogLevel(line string) (level logrus.Level, found bool, err error) { +func extractLogLevel(line string) (logrus.Level, bool, error) { matches := extractLogLevelRegex.FindStringSubmatch(line) noMatches := len(matches) < 2 @@ -140,7 +140,7 @@ func extractLogLevel(line string) (level logrus.Level, found bool, err error) { return 0, false, nil } - level, err = logrus.ParseLevel(matches[1]) + level, err := logrus.ParseLevel(matches[1]) if err != nil { return 0, false, err } diff --git a/agent/agents/supervisor/supervisor.go b/agent/agents/supervisor/supervisor.go index b6a5214a3e..76909015c6 100644 --- a/agent/agents/supervisor/supervisor.go +++ b/agent/agents/supervisor/supervisor.go @@ -384,7 +384,11 @@ func (s *Supervisor) setBuiltinAgents(builtinAgents map[string]*agentpb.SetState // filter extracts IDs of the Agents that should be started, restarted with new parameters, or stopped, // and filters out IDs of the Agents that should not be changed. -func filter(existing, ap map[string]agentpb.AgentParams) (toStart, toRestart, toStop []string) { +func filter(existing, ap map[string]agentpb.AgentParams) ([]string, []string, []string) { + toStart := make([]string, 0, len(ap)) + toRestart := make([]string, 0, len(ap)) + toStop := make([]string, 0, len(existing)) + // existing agents not present in the new requested state should be stopped for existingID := range existing { if ap[existingID] == nil { @@ -411,7 +415,8 @@ func filter(existing, ap map[string]agentpb.AgentParams) (toStart, toRestart, to sort.Strings(toStop) sort.Strings(toRestart) sort.Strings(toStart) - return + + return toStart, toRestart, toStop } //nolint:golint,stylecheck @@ -462,6 +467,7 @@ func (s *Supervisor) startProcess(agentID string, agentProcess *agentpb.SetState close(done) }() + //nolint:forcetypeassert s.agentProcesses[agentID] = &agentProcessInfo{ cancel: cancel, done: done, @@ -590,6 +596,7 @@ func (s *Supervisor) startBuiltin(agentID string, builtinAgent *agentpb.SetState close(done) }() + //nolint:forcetypeassert s.builtinAgents[agentID] = &builtinAgentInfo{ cancel: cancel, done: done, diff --git a/agent/client/client.go b/agent/client/client.go index d319fc5c0b..af92fbf01d 100644 --- a/agent/client/client.go +++ b/agent/client/client.go @@ -580,7 +580,7 @@ func (c *Client) handleStartJobRequest(p *agentpb.StartJobRequest) error { Port: int(j.MysqlBackup.Port), Socket: j.MysqlBackup.Socket, } - job = jobs.NewMySQLBackupJob(p.JobId, timeout, j.MysqlBackup.Name, dbConnCfg, locationConfig) + job = jobs.NewMySQLBackupJob(p.JobId, timeout, j.MysqlBackup.Name, dbConnCfg, locationConfig, j.MysqlBackup.Folder) case *agentpb.StartJobRequest_MysqlRestoreBackup: var locationConfig jobs.BackupLocationConfig @@ -598,7 +598,7 @@ func (c *Client) handleStartJobRequest(p *agentpb.StartJobRequest) error { return errors.Errorf("unknown location config: %T", j.MysqlRestoreBackup.LocationConfig) } - job = jobs.NewMySQLRestoreJob(p.JobId, timeout, j.MysqlRestoreBackup.Name, locationConfig) + job = jobs.NewMySQLRestoreJob(p.JobId, timeout, j.MysqlRestoreBackup.Name, locationConfig, j.MysqlRestoreBackup.Folder) case *agentpb.StartJobRequest_MongodbBackup: var locationConfig jobs.BackupLocationConfig @@ -629,7 +629,8 @@ func (c *Client) handleStartJobRequest(p *agentpb.StartJobRequest) error { Port: int(j.MongodbBackup.Port), Socket: j.MongodbBackup.Socket, } - job, err = jobs.NewMongoDBBackupJob(p.JobId, timeout, j.MongodbBackup.Name, dbConnCfg, locationConfig, j.MongodbBackup.EnablePitr, j.MongodbBackup.DataModel) + job, err = jobs.NewMongoDBBackupJob(p.JobId, timeout, j.MongodbBackup.Name, dbConnCfg, locationConfig, + j.MongodbBackup.EnablePitr, j.MongodbBackup.DataModel, j.MongodbBackup.Folder) if err != nil { return err } @@ -663,7 +664,8 @@ func (c *Client) handleStartJobRequest(p *agentpb.StartJobRequest) error { } job = jobs.NewMongoDBRestoreJob(p.JobId, timeout, j.MongodbRestoreBackup.Name, - j.MongodbRestoreBackup.PitrTimestamp.AsTime(), dbConnCfg, locationConfig, c.supervisor) + j.MongodbRestoreBackup.PitrTimestamp.AsTime(), dbConnCfg, locationConfig, + c.supervisor, j.MongodbRestoreBackup.Folder, j.MongodbRestoreBackup.PbmMetadata.Name) default: return errors.Errorf("unknown job type: %T", j) } @@ -820,7 +822,7 @@ func dial(dialCtx context.Context, cfg *config.Config, l *logrus.Entry) (*dialRe }, nil } -func getNetworkInformation(channel *channel.Channel) (latency, clockDrift time.Duration, err error) { +func getNetworkInformation(channel *channel.Channel) (latency, clockDrift time.Duration, err error) { //nolint:nonamedreturns start := time.Now() var resp agentpb.ServerResponsePayload resp, err = channel.SendAndWaitResponse(&agentpb.Ping{}) @@ -845,7 +847,7 @@ func getNetworkInformation(channel *channel.Channel) (latency, clockDrift time.D } // GetNetworkInformation sends ping request to the server and returns info about latency and clock drift. -func (c *Client) GetNetworkInformation() (latency, clockDrift time.Duration, err error) { +func (c *Client) GetNetworkInformation() (latency, clockDrift time.Duration, err error) { //nolint:nonamedreturns c.rw.RLock() channel := c.channel c.rw.RUnlock() diff --git a/agent/client/pbm.go b/agent/client/pbm.go index 0b61716f09..3534b9d76f 100644 --- a/agent/client/pbm.go +++ b/agent/client/pbm.go @@ -19,6 +19,7 @@ import ( "os/exec" "path/filepath" "strconv" + "strings" "time" "github.com/pkg/errors" @@ -40,6 +41,9 @@ func (c *Client) handlePBMSwitchRequest(ctx context.Context, req *agentpb.PBMSwi return errors.WithStack(err) } + // TODO following line is a quick patch. Come up with something better. + dsn = strings.Replace(dsn, "directConnection=true", "directConnection=false", 1) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() diff --git a/agent/cmd/pmm-agent-entrypoint/main.go b/agent/cmd/pmm-agent-entrypoint/main.go index ee6d2d5329..636bbf874e 100644 --- a/agent/cmd/pmm-agent-entrypoint/main.go +++ b/agent/cmd/pmm-agent-entrypoint/main.go @@ -24,6 +24,7 @@ import ( "syscall" "time" + "github.com/pkg/errors" reaper "github.com/ramr/go-reaper" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -87,7 +88,7 @@ func runPmmAgent(ctx context.Context, commandLineArgs []string, restartPolicy re } else { pmmAgentProcessID = cmd.Process.Pid if err := cmd.Wait(); err != nil { - exitError, ok := err.(*exec.ExitError) + exitError, ok := err.(*exec.ExitError) //nolint:errorlint if !ok { l.Errorf("Can't get exit code for '%s'. err: %s", pmmAgentFullCommand, err) exitCode = -1 @@ -147,7 +148,7 @@ func main() { go func() { s := <-signals signal.Stop(signals) - l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) + l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert if pmmAgentProcessID != 0 { l.Info("Graceful shutdown for pmm-agent...") // graceful shutdown for pmm-agent @@ -179,7 +180,7 @@ func main() { os.Exit(1) } - if *pmmAgentSetup { + if *pmmAgentSetup { //nolint:nestif var agent *exec.Cmd restartPolicy := doNotRestart if *pmmAgentSidecar { @@ -204,7 +205,7 @@ func main() { } status = 0 - if *pmmAgentPrerunFile != "" || *pmmAgentPrerunScript != "" { + if *pmmAgentPrerunFile != "" || *pmmAgentPrerunScript != "" { //nolint:nestif l.Info("Starting pmm-agent for prerun...") agent := commandPmmAgent([]string{"run"}) err := agent.Start() @@ -218,7 +219,7 @@ func main() { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { + if exitError, ok := err.(*exec.ExitError); ok { //nolint:errorlint status = exitError.ExitCode() l.Infof("Prerun file exited with %d", exitError.ExitCode()) } @@ -231,7 +232,8 @@ func main() { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { + var exitError *exec.ExitError + if errors.As(err, &exitError) { status = exitError.ExitCode() l.Infof("Prerun shell script exited with %d", exitError.ExitCode()) } @@ -249,11 +251,12 @@ func main() { err = agent.Wait() if err != nil { - exitError, ok := err.(*exec.ExitError) - if !ok { - l.Warnf("Can't get exit code for pmm-agent. Error code: %s", err) - } else { + var exitError *exec.ExitError + if errors.As(err, &exitError) { + status = exitError.ExitCode() l.Infof("Prerun pmm-agent exited with %d", exitError.ExitCode()) + } else { + l.Warnf("Can't get exit code for pmm-agent. Error code: %s", err) } } timer.Stop() diff --git a/agent/commands/clients.go b/agent/commands/clients.go index 2277ff7380..43c4ae949e 100644 --- a/agent/commands/clients.go +++ b/agent/commands/clients.go @@ -54,7 +54,7 @@ func setLocalTransport(host string, port uint16, l *logrus.Entry) { transport.Context = context.Background() // disable HTTP/2 - httpTransport := transport.Transport.(*http.Transport) + httpTransport := transport.Transport.(*http.Transport) //nolint:forcetypeassert httpTransport.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) agentlocalpb.Default.SetTransport(transport) @@ -124,7 +124,7 @@ func setServerTransport(u *url.URL, insecureTLS bool, l *logrus.Entry) { } // disable HTTP/2, set TLS config - httpTransport := transport.Transport.(*http.Transport) + httpTransport := transport.Transport.(*http.Transport) //nolint:forcetypeassert httpTransport.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) if u.Scheme == "https" { httpTransport.TLSClientConfig = tlsconfig.Get() @@ -160,7 +160,7 @@ func ParseCustomLabels(labels string) (map[string]string, error) { // serverRegister registers Node on PMM Server. // // This method is not thread-safe. -func serverRegister(cfgSetup *config.Setup) (agentID, token string, _ error) { +func serverRegister(cfgSetup *config.Setup) (agentID, token string, _ error) { //nolint:nonamedreturns nodeTypes := map[string]string{ "generic": node.RegisterNodeBodyNodeTypeGENERICNODE, "container": node.RegisterNodeBodyNodeTypeCONTAINERNODE, diff --git a/agent/commands/run.go b/agent/commands/run.go index d208acb12e..f5acd13386 100644 --- a/agent/commands/run.go +++ b/agent/commands/run.go @@ -54,7 +54,7 @@ func Run() { go func() { s := <-signals signal.Stop(signals) - l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) + l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert cancel() }() diff --git a/agent/commands/setup.go b/agent/commands/setup.go index cd8a410b0b..df79e2076d 100644 --- a/agent/commands/setup.go +++ b/agent/commands/setup.go @@ -93,7 +93,7 @@ func checkStatus(configFilepath string, l *logrus.Entry) (string, bool) { fmt.Printf("Checking local pmm-agent status...\n") status, err := localStatus() l.Debugf("Status error: %#v", err) - switch err := err.(type) { + switch err := err.(type) { //nolint:errorlint case nil: if status.ConfigFilepath == "" { fmt.Printf("pmm-agent is running but does not read configuration from the file. " + @@ -141,7 +141,7 @@ func register(cfg *config.Config, l *logrus.Entry) { l.Debugf("Register error: %#v", err) if err != nil { msg := err.Error() - if e, _ := err.(*node.RegisterNodeDefault); e != nil { + if e, _ := err.(*node.RegisterNodeDefault); e != nil { //nolint:errorlint msg = e.Payload.Message + "" switch e.Code() { case http.StatusConflict: @@ -150,7 +150,7 @@ func register(cfg *config.Config, l *logrus.Entry) { msg += "\nPlease check username and password" } } - if _, ok := err.(nginxError); ok { + if _, ok := err.(nginxError); ok { //nolint:errorlint msg += ".\nPlease check pmm-managed logs." } @@ -173,7 +173,7 @@ func reload(l *logrus.Entry) { // sync error handling with Reload API method err := localReload() l.Debugf("Reload error: %#v", err) - if err, _ := err.(*agent_local.ReloadDefault); err != nil && err.Code() == int(codes.FailedPrecondition) { + if err, _ := err.(*agent_local.ReloadDefault); err != nil && err.Code() == int(codes.FailedPrecondition) { //nolint:errorlint fmt.Printf("Failed to reload configuration: %s.\n", err.Payload.Message) os.Exit(1) } diff --git a/agent/config/config.go b/agent/config/config.go index 033c3914ac..eebca14167 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -179,7 +179,7 @@ func getFromCmdLine(cfg *Config, l *logrus.Entry) (string, error) { } // get is Get for unit tests: it parses args instead of command-line. -func get(args []string, cfg *Config, l *logrus.Entry) (configFileF string, err error) { +func get(args []string, cfg *Config, l *logrus.Entry) (configFileF string, err error) { //nolint:nonamedreturns // tweak configuration on exit to cover all return points defer func() { if cfg == nil { diff --git a/agent/config/config_test.go b/agent/config/config_test.go index 56e0a0e486..7d3feba468 100644 --- a/agent/config/config_test.go +++ b/agent/config/config_test.go @@ -63,8 +63,8 @@ func TestLoadFromFile(t *testing.T) { cfg, err := loadFromFile(name) require.IsType(t, (*os.PathError)(nil), err) - assert.Equal(t, "open", err.(*os.PathError).Op) - assert.EqualError(t, err.(*os.PathError).Err, `permission denied`) + assert.Equal(t, "open", err.(*os.PathError).Op) //nolint:errorlint + assert.EqualError(t, err.(*os.PathError).Err, `permission denied`) //nolint:errorlint assert.Nil(t, cfg) }) diff --git a/agent/config/storage.go b/agent/config/storage.go index 365eed63a5..d3c8630ee2 100644 --- a/agent/config/storage.go +++ b/agent/config/storage.go @@ -49,7 +49,7 @@ func (s *Storage) Reload(l *logrus.Entry) (string, error) { newCfg := &Config{} cfgPath, err := getFromCmdLine(newCfg, l) if err != nil { - if _, ok := err.(ConfigFileDoesNotExistError); !ok { + if _, ok := err.(ConfigFileDoesNotExistError); !ok { //nolint:errorlint return cfgPath, err } } diff --git a/agent/connectionchecker/connection_checker.go b/agent/connectionchecker/connection_checker.go index 3c172e146b..6d6ef9ecaf 100644 --- a/agent/connectionchecker/connection_checker.go +++ b/agent/connectionchecker/connection_checker.go @@ -194,9 +194,9 @@ func (cc *ConnectionChecker) checkMongoDBConnection(ctx context.Context, dsn str return &res } - resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "listDatabases", Value: 1}}) + resp := client.Database("admin").RunCommand(ctx, bson.D{{Key: "getDiagnosticData", Value: 1}}) if err = resp.Err(); err != nil { - cc.l.Debugf("checkMongoDBConnection: failed to runCommand listDatabases: %s", err) + cc.l.Debugf("checkMongoDBConnection: failed to runCommand getDiagnosticData: %s", err) res.Error = err.Error() return &res } diff --git a/agent/connectionchecker/connection_checker_test.go b/agent/connectionchecker/connection_checker_test.go index c2551adbf6..94aef528fd 100644 --- a/agent/connectionchecker/connection_checker_test.go +++ b/agent/connectionchecker/connection_checker_test.go @@ -99,9 +99,9 @@ func TestConnectionChecker(t *testing.T) { Type: inventorypb.ServiceType_MONGODB_SERVICE, Timeout: durationpb.New(3 * time.Second), }, - expectedErr: `\(Unauthorized\) (?:command listDatabases requires authentication|` + + expectedErr: `\(Unauthorized\) (?:command getDiagnosticData requires authentication|` + `there are no users authenticated|` + - `not authorized on admin to execute command \{ listDatabases\: 1 \})`, + `not authorized on admin to execute command \{ getDiagnosticData\: 1 \})`, }, { name: "MongoDB wrong params", diff --git a/agent/docker-compose-pg-load.yml b/agent/docker-compose-pg-load.yml index 76519d86d1..53f013f437 100644 --- a/agent/docker-compose-pg-load.yml +++ b/agent/docker-compose-pg-load.yml @@ -4,7 +4,7 @@ version: '3.7' services: postgres-pgmonitor: - image: ${POSTGRES_IMAGE:-perconalab/percona-distribution-postgresql:13.3} + image: ${POSTGRES_IMAGE:-perconalab/percona-distribution-postgresql:14.7} container_name: pmm-agent-postgres-pgmonitor command: > -c shared_preload_libraries=pg_stat_monitor @@ -21,7 +21,7 @@ services: - test_db_postgres:/docker-entrypoint-initdb.d/ postgres-load: - image: ${POSTGRES_IMAGE:-perconalab/percona-distribution-postgresql:13.3} + image: ${POSTGRES_IMAGE:-perconalab/percona-distribution-postgresql:14.7} container_name: pmm-agent-postgres-load depends_on: - postgres-pgmonitor diff --git a/agent/main.go b/agent/main.go index 2bbeeadb3d..0a5ea9a0dc 100644 --- a/agent/main.go +++ b/agent/main.go @@ -41,16 +41,15 @@ func main() { FullTimestamp: true, TimestampFormat: "2006-01-02T15:04:05.000-07:00", - CallerPrettyfier: func(f *runtime.Frame) (function string, file string) { - _, function = filepath.Split(f.Function) + CallerPrettyfier: func(f *runtime.Frame) (string, string) { + _, function := filepath.Split(f.Function) // keep a single directory name as a compromise between brevity and unambiguity - var dir string - dir, file = filepath.Split(f.File) + dir, file := filepath.Split(f.File) dir = filepath.Base(dir) file = fmt.Sprintf("%s/%s:%d", dir, file, f.Line) - return + return function, file }, }) diff --git a/agent/queryparser/parser.go b/agent/queryparser/parser.go index f38ba4af53..b3f4b9dae8 100644 --- a/agent/queryparser/parser.go +++ b/agent/queryparser/parser.go @@ -12,30 +12,55 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package queryparser provides functionality for queries parsing. +// Package queryparser provides functionality for queries fingerprint and placeholders parsing. package queryparser import ( - "github.com/pkg/errors" - "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" + "fmt" + "regexp" + "strings" ) -// MySQL parse query and return fingeprint and placeholders. -func MySQL(q string) (string, uint32, error) { - normalizedQuery, _, err := sqlparser.Parse2(q) - if err != nil { - return "", 0, errors.Wrap(err, "cannot parse query") +var ( + allStringsRegexp = regexp.MustCompile(`'.*?'|".*?"`) + braceletsRegexp = regexp.MustCompile(`\(.*?\)`) + braceletsMultiformRegexp = regexp.MustCompile(`\(\?\+\)|\(\.\.\.\)`) + decimalsPlaceholdersRegexp = regexp.MustCompile(`:\d+`) +) + +// GetMySQLFingerprintPlaceholders parse query and digest text and return fingerprint and placeholders count. +func GetMySQLFingerprintPlaceholders(query, digestText string) (string, uint32) { + queryWithoutStrings := allStringsRegexp.ReplaceAllString(query, "") + contents := make(map[int]string) + bracelets := braceletsRegexp.FindAllString(queryWithoutStrings, -1) + for k, v := range bracelets { + count := strings.Count(v, ",") + contents[k] = fmt.Sprintf("(%s?)", strings.Repeat("?, ", count)) } - bv := make(map[string]*query.BindVariable) - err = sqlparser.Normalize(normalizedQuery, sqlparser.NewReservedVars("", sqlparser.GetBindvars(normalizedQuery)), bv) - if err != nil { - return "", 0, errors.Wrap(err, "cannot normalize query") + i := 0 + result := braceletsMultiformRegexp.ReplaceAllStringFunc(digestText, func(s string) string { + c := contents[i] + i++ + return c + }) + + var count uint32 + for { + index := strings.Index(result, "?") + if index == -1 { + break + } + + count++ + result = strings.Replace(result, "?", fmt.Sprintf(":%d", count), 1) } - parsedQuery := sqlparser.NewParsedQuery(normalizedQuery) - bindVars := sqlparser.GetBindvars(normalizedQuery) + return strings.TrimSpace(result), count +} - return parsedQuery.Query, uint32(len(bindVars)), nil +// GetMySQLFingerprintFromExplainFingerprint convert placeholders in fingerprint from our format (:1, :2 etc) into ? +// to make it compatible with sql.Query functions. +func GetMySQLFingerprintFromExplainFingerprint(explainFingerprint string) string { + return decimalsPlaceholdersRegexp.ReplaceAllString(explainFingerprint, "?") } diff --git a/agent/queryparser/parser_test.go b/agent/queryparser/parser_test.go index 5b2f5e5027..f68fafe430 100644 --- a/agent/queryparser/parser_test.go +++ b/agent/queryparser/parser_test.go @@ -18,56 +18,64 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type test struct { Query string - ExpectedQuery string + DigestText string + ExpectedFingerprint string ExpectedPlaceHoldersCount uint32 } func TestMySQL(t *testing.T) { sqls := []test{ { - Query: "SELECT name FROM people where city = 'Paris'", - ExpectedQuery: "select `name` from people where city = :1", + Query: "SELECT /* Sleep */ sleep(0.1)", + DigestText: "SELECT `sleep` (?)", + ExpectedFingerprint: "SELECT `sleep` (:1)", ExpectedPlaceHoldersCount: 1, }, { - Query: "SELECT name FROM people where city = ?", - ExpectedQuery: "select `name` from people where city = :v1", - ExpectedPlaceHoldersCount: 1, + Query: "SELECT `city` . `CountryCode` , `city` . `Name` FROM `world` . `city` WHERE NAME IN ('? ? ??? (...)', \"(?+)\") LIMIT ?", + DigestText: "SELECT `city` . `CountryCode` , `city` . `Name` FROM `world` . `city` WHERE NAME IN (...) LIMIT ?", + ExpectedFingerprint: "SELECT `city` . `CountryCode` , `city` . `Name` FROM `world` . `city` WHERE NAME IN (:1, :2) LIMIT :3", + ExpectedPlaceHoldersCount: 3, }, { - Query: "INSERT INTO people VALUES('John', 'Paris', 70010)", - ExpectedQuery: "insert into people values (:1, :2, :3)", + Query: "SELECT SCHEMA_NAME FROM information_schema.schemata WHERE SCHEMA_NAME NOT IN ('mysql', 'performance_schema', 'information_schema')", + DigestText: "SELECT SCHEMA_NAME FROM `information_schema` . `schemata` WHERE SCHEMA_NAME NOT IN (...)", + ExpectedFingerprint: "SELECT SCHEMA_NAME FROM `information_schema` . `schemata` WHERE SCHEMA_NAME NOT IN (:1, :2, :3)", ExpectedPlaceHoldersCount: 3, }, { - Query: "INSERT INTO people VALUES(?, ?, ?)", - ExpectedQuery: "insert into people values (:v1, :v2, :v3)", - ExpectedPlaceHoldersCount: 3, + Query: "SELECT productVendor, COUNT(*) FROM products GROUP BY productVendor HAVING COUNT(*) >= 9 ORDER BY COUNT(*) DESC;", + DigestText: "SELECT `productVendor` , COUNT ( * ) FROM `products` GROUP BY `productVendor` HAVING COUNT ( * ) >= ? ORDER BY COUNT ( * ) DESC ;", + ExpectedFingerprint: "SELECT `productVendor` , COUNT ( * ) FROM `products` GROUP BY `productVendor` HAVING COUNT ( * ) >= :1 ORDER BY COUNT ( * ) DESC ;", + ExpectedPlaceHoldersCount: 1, + }, + { + Query: "INSERT INTO sbtest1 (id, k, c, pad) VALUES (4062, 72, '80700175623-243441', '76422972981-022')", + DigestText: "INSERT INTO `sbtest1` ( `id` , `k` , `c` , `pad` ) VALUES (...)", + ExpectedFingerprint: "INSERT INTO `sbtest1` ( `id` , `k` , `c` , `pad` ) VALUES (:1, :2, :3, :4)", + ExpectedPlaceHoldersCount: 4, + }, + { + Query: "INSERT INTO sbtest1 (id, k, c, pad) VALUES (4062, 72, '80700175623-243441', '76422972981-022')", + DigestText: "INSERT INTO `sbtest1` ( `id` , `k` , `c` , `pad` ) VALUES (?+)", + ExpectedFingerprint: "INSERT INTO `sbtest1` ( `id` , `k` , `c` , `pad` ) VALUES (:1, :2, :3, :4)", + ExpectedPlaceHoldersCount: 4, }, { - Query: `SELECT t.table_schema, t.table_name, column_name, auto_increment, pow(2, case data_type when - 'tinyint' then 7 when 'smallint' then 15 when 'mediumint' then 23 when 'int' then 31 when 'bigint' then 63 - end +(column_type like '% unsigned')) -1 as max_int FROM information_schema.columns c STRAIGHT_JOIN - information_schema.tables t ON BINARY t.table_schema = c.table_schema AND BINARY t.table_name = c.table_name - WHERE c.extra = 'auto_increment' AND t.auto_increment IS NOT NULL`, - ExpectedQuery: "select t.table_schema, t.table_name, column_name, `auto_increment`, pow(:1, case " + - "data_type when :2 then :3 when :4 then :5 when :6 then :7 when :8 then :9 when :10 then :11 end + " + - "(column_type like :12)) - :13 as max_int from information_schema.`columns` as c straight_join information_schema.`tables` " + - "as t on convert(t.table_schema, BINARY) = c.table_schema and convert(t.table_name, BINARY) = c.table_name where c.extra = :14 " + - "and t.`auto_increment` is not null", - ExpectedPlaceHoldersCount: 14, + Query: "SELECT c FROM sbtest1 WHERE id BETWEEN 1 AND 100", + DigestText: "select c from sbtest1 where id between ? and ?", + ExpectedFingerprint: "select c from sbtest1 where id between :1 and :2", + ExpectedPlaceHoldersCount: 2, }, } for _, sql := range sqls { - query, placeholdersCount, err := MySQL(sql.Query) - require.NoError(t, err) - assert.Equal(t, sql.ExpectedQuery, query) + query, placeholdersCount := GetMySQLFingerprintPlaceholders(sql.Query, sql.DigestText) + assert.Equal(t, sql.ExpectedFingerprint, query) assert.Equal(t, sql.ExpectedPlaceHoldersCount, placeholdersCount) } } diff --git a/agent/runner/actions/common.go b/agent/runner/actions/common.go index f5f9cdf7ab..adff5d5bd0 100644 --- a/agent/runner/actions/common.go +++ b/agent/runner/actions/common.go @@ -19,6 +19,7 @@ import ( "database/sql" "encoding/json" "fmt" + "regexp" "strings" "github.com/go-sql-driver/mysql" @@ -30,6 +31,8 @@ import ( const queryTag = "pmm-agent-tests:MySQLVersion" +var whiteSpacesRegExp = regexp.MustCompile(`\s+`) + // jsonRows converts input to JSON array: // [ // @@ -84,6 +87,7 @@ func prepareRealTableName(name string) string { } func parseRealTableName(query string) string { + query = whiteSpacesRegExp.ReplaceAllString(query, " ") // due to historical reasons we parsing only one table name keyword := "FROM " @@ -98,16 +102,16 @@ func parseRealTableName(query string) string { parsed = strings.ReplaceAll(parsed, ";", "") index = strings.Index(parsed, " ") if index == -1 { - return prepareRealTableName(parsed) + return strings.TrimSpace(parsed) } - return prepareRealTableName(parsed[:index+1]) + return strings.TrimSpace(parsed[:index+1]) } func prepareQueryWithDatabaseTableName(query, name string) string { // use %#q to convert "table" to `"table"` and `table` to "`table`" to avoid SQL injections q := fmt.Sprintf("%s %#q", query, prepareRealTableName(name)) - if strings.Index(q, ".") == -1 { + if !strings.Contains(q, ".") { return q } diff --git a/agent/runner/actions/mysql_explain_action.go b/agent/runner/actions/mysql_explain_action.go index e729d95e5a..ffbafcdf89 100644 --- a/agent/runner/actions/mysql_explain_action.go +++ b/agent/runner/actions/mysql_explain_action.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" + "github.com/percona/pmm/agent/queryparser" "github.com/percona/pmm/agent/tlshelpers" "github.com/percona/pmm/api/agentpb" "github.com/percona/pmm/utils/sqlrows" @@ -82,16 +83,14 @@ func (a *mysqlExplainAction) Run(ctx context.Context) ([]byte, error) { return nil, errors.New("Query to EXPLAIN is empty") } - // Workaround for bug in our MySQL parser if there is keyword "IN" in query. - // TODO In future it should be fixed on parser side. - a.params.Query = strings.ReplaceAll(a.params.Query, "...", "?") - // Explain is supported only for DML queries. // https://dev.mysql.com/doc/refman/8.0/en/using-explain.html if !isDMLQuery(a.params.Query) { return nil, errors.New("Functionality EXPLAIN is supported only for DML queries (SELECT, INSERT, UPDATE, DELETE, REPLACE)") } + a.params.Query = queryparser.GetMySQLFingerprintFromExplainFingerprint(a.params.Query) + // query has a copy of the original params.Query field if the query is a SELECT or the equivalent // SELECT after converting DML queries. query, changedToSelect := dmlToSelect(a.params.Query) diff --git a/agent/runner/actions/mysql_explain_action_test.go b/agent/runner/actions/mysql_explain_action_test.go index e1931e1c9b..1e32edfd8d 100644 --- a/agent/runner/actions/mysql_explain_action_test.go +++ b/agent/runner/actions/mysql_explain_action_test.go @@ -294,19 +294,19 @@ func TestParseRealTableNameMySQL(t *testing.T) { tests := []testCase{ {"SELECT;", ""}, - {"SELECT `district` FROM `people`;", "people"}, - {"SELECT `district` FROM `people`", "people"}, + {"SELECT `district` FROM `people`;", "`people`"}, + {"SELECT `district` FROM `people`", "`people`"}, {"SELECT `district` FROM people", "people"}, {"SELECT name FROM people WHERE city = 'Paris'", "people"}, {"SELECT name FROM world.people WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM `world`.`people` WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM `world` . `people` WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM \"world\".\"people\" WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM \"world\" . \"people\" WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM 'world'.'people' WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM 'world' . 'people' WHERE city = 'Paris'", "world.people"}, - {"SELECT name FROM 'world' . \"people\" WHERE city = `Paris`", "world.people"}, - {"SELECT DATE(`date`) AS `date` FROM (SELECT MIN(`date`) AS `date`, `player_name` FROM `people` GROUP BY `player_name`) AS t GROUP BY DATE(`date`);", "people"}, + {"SELECT name FROM `world`.`people` WHERE city = 'Paris'", "`world`.`people`"}, + {"SELECT name FROM `world` . `people` WHERE city = 'Paris'", "`world`.`people`"}, + {"SELECT name FROM \"world\".\"people\" WHERE city = 'Paris'", "\"world\".\"people\""}, + {"SELECT name FROM \"world\" . \"people\" WHERE city = 'Paris'", "\"world\".\"people\""}, + {"SELECT name FROM 'world'.'people' WHERE city = 'Paris'", "'world'.'people'"}, + {"SELECT name FROM 'world' . 'people' WHERE city = 'Paris'", "'world'.'people'"}, + {"SELECT name FROM 'world' . \"people\" WHERE city = `Paris`", "'world'.\"people\""}, + {"SELECT DATE(`date`) AS `date` FROM (SELECT MIN(`date`) AS `date`, `player_name` FROM `people` GROUP BY `player_name`) AS t GROUP BY DATE(`date`);", "`people`"}, } for _, test := range tests { diff --git a/agent/runner/actions/mysql_show_table_status_action.go b/agent/runner/actions/mysql_show_table_status_action.go index 49b0f3fa68..2baa53e772 100644 --- a/agent/runner/actions/mysql_show_table_status_action.go +++ b/agent/runner/actions/mysql_show_table_status_action.go @@ -67,9 +67,9 @@ func (a *mysqlShowTableStatusAction) Run(ctx context.Context) ([]byte, error) { defer db.Close() //nolint:errcheck defer tlshelpers.DeregisterMySQLCerts() - table := a.params.Table - if containsDB := strings.Contains(a.params.Table, "."); containsDB { - split := strings.Split(a.params.Table, ".") + table := prepareRealTableName(a.params.Table) + if containsDB := strings.Contains(table, "."); containsDB { + split := strings.Split(table, ".") if len(split) > 1 { useQuery := fmt.Sprintf("USE /* pmm-agent */ %s;", split[0]) table = split[1] diff --git a/agent/runner/actions/mysql_show_table_status_action_test.go b/agent/runner/actions/mysql_show_table_status_action_test.go index af8a49af8e..17680ed485 100644 --- a/agent/runner/actions/mysql_show_table_status_action_test.go +++ b/agent/runner/actions/mysql_show_table_status_action_test.go @@ -101,7 +101,7 @@ func TestShowTableStatus(t *testing.T) { defer cancel() _, err := a.Run(ctx) - assert.EqualError(t, err, `table "city\"; DROP TABLE city; --" not found`) + assert.EqualError(t, err, `table "city; DROP TABLE city; --" not found`) var count int err = db.QueryRow("SELECT COUNT(*) FROM city").Scan(&count) diff --git a/agent/runner/actions/postgresql_show_create_table_action.go b/agent/runner/actions/postgresql_show_create_table_action.go index 5729f7e095..3a33adce77 100644 --- a/agent/runner/actions/postgresql_show_create_table_action.go +++ b/agent/runner/actions/postgresql_show_create_table_action.go @@ -299,6 +299,7 @@ ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname`, tableID) } fmt.Fprintf(bw, "\t%q", info.Relname) + //nolint:nestif if pointer.GetString(info.Contype) == "x" { fmt.Fprintf(bw, " %s", pointer.GetString(info.PgGetConstraintDef)) } else { diff --git a/agent/runner/jobs/errors.go b/agent/runner/jobs/errors.go new file mode 100644 index 0000000000..21a320008a --- /dev/null +++ b/agent/runner/jobs/errors.go @@ -0,0 +1,20 @@ +// Copyright 2019 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jobs + +import "github.com/pkg/errors" + +// ErrNotFound returned when backup record is not found by backup tool. +var ErrNotFound = errors.New("backup record not found by backup tool") diff --git a/agent/runner/jobs/mongodb_backup_job.go b/agent/runner/jobs/mongodb_backup_job.go index d6b2e5fd42..7a0c342aa3 100644 --- a/agent/runner/jobs/mongodb_backup_job.go +++ b/agent/runner/jobs/mongodb_backup_job.go @@ -35,6 +35,8 @@ const ( logsCheckInterval = 3 * time.Second waitForLogs = 2 * logsCheckInterval + + pbmArtifactJSONPostfix = ".pbm.json" ) // MongoDBBackupJob implements Job from MongoDB backup. @@ -48,6 +50,7 @@ type MongoDBBackupJob struct { pitr bool dataModel backuppb.DataModel jobLogger *pbmJobLogger + folder string } // NewMongoDBBackupJob creates new Job for MongoDB backup. @@ -59,6 +62,7 @@ func NewMongoDBBackupJob( locationConfig BackupLocationConfig, pitr bool, dataModel backuppb.DataModel, + folder string, ) (*MongoDBBackupJob, error) { if dataModel != backuppb.DataModel_PHYSICAL && dataModel != backuppb.DataModel_LOGICAL { return nil, errors.Errorf("'%s' is not a supported data model for MongoDB backups", dataModel) @@ -78,6 +82,7 @@ func NewMongoDBBackupJob( pitr: pitr, dataModel: dataModel, jobLogger: newPbmJobLogger(id, pbmBackupJob, dbURL), + folder: folder, }, nil } @@ -104,7 +109,7 @@ func (j *MongoDBBackupJob) Run(ctx context.Context, send Send) error { return errors.Wrapf(err, "lookpath: %s", pbmBin) } - conf, err := createPBMConfig(&j.locationConfig, j.name, j.pitr) + conf, err := createPBMConfig(&j.locationConfig, j.folder, j.pitr) if err != nil { return errors.WithStack(err) } @@ -149,11 +154,40 @@ func (j *MongoDBBackupJob) Run(ctx context.Context, send Send) error { j.jobLogger.sendLog(send, err.Error(), false) return errors.Wrap(err, "failed to wait backup completion") } + + sharded, err := isShardedCluster(ctx, j.dbURL) + if err != nil { + return err + } + + backupTimestamp, err := pbmGetSnapshotTimestamp(ctx, j.dbURL, pbmBackupOut.Name) + if err != nil { + return err + } + + // mongoArtifactFiles returns list of files and folders the backup consists of (hardcoded). + mongoArtifactFiles := func(pbmBackupName string) []*backuppb.File { + res := []*backuppb.File{ + {Name: pbmBackupName + pbmArtifactJSONPostfix}, + {Name: pbmBackupName, IsDirectory: true}, + } + return res + } + send(&agentpb.JobResult{ JobId: j.id, Timestamp: timestamppb.Now(), Result: &agentpb.JobResult_MongodbBackup{ - MongodbBackup: &agentpb.JobResult_MongoDBBackup{}, + MongodbBackup: &agentpb.JobResult_MongoDBBackup{ + IsShardedCluster: sharded, + Metadata: &backuppb.Metadata{ + FileList: mongoArtifactFiles(pbmBackupOut.Name), + RestoreTo: timestamppb.New(*backupTimestamp), + BackupToolMetadata: &backuppb.Metadata_PbmMetadata{ + PbmMetadata: &backuppb.PbmMetadata{Name: pbmBackupOut.Name}, + }, + }, + }, }, }) diff --git a/agent/runner/jobs/mongodb_backup_job_test.go b/agent/runner/jobs/mongodb_backup_job_test.go index d8cc5b1ba2..68dcebfbcb 100644 --- a/agent/runner/jobs/mongodb_backup_job_test.go +++ b/agent/runner/jobs/mongodb_backup_job_test.go @@ -118,7 +118,7 @@ func TestNewMongoDBBackupJob(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - _, err := NewMongoDBBackupJob(t.Name(), testJobDuration, t.Name(), tc.dbConfig, BackupLocationConfig{}, tc.pitr, tc.dataModel) + _, err := NewMongoDBBackupJob(t.Name(), testJobDuration, t.Name(), tc.dbConfig, BackupLocationConfig{}, tc.pitr, tc.dataModel, "artifact_folder") if tc.errMsg == "" { assert.NoError(t, err) } else { diff --git a/agent/runner/jobs/mongodb_restore_job.go b/agent/runner/jobs/mongodb_restore_job.go index 621c6dc0c4..4b67202adb 100644 --- a/agent/runner/jobs/mongodb_restore_job.go +++ b/agent/runner/jobs/mongodb_restore_job.go @@ -47,6 +47,8 @@ type MongoDBRestoreJob struct { locationConfig BackupLocationConfig agentsRestarter agentsRestarter jobLogger *pbmJobLogger + folder string + pbmBackupName string } // NewMongoDBRestoreJob creates new Job for MongoDB backup restore. @@ -58,6 +60,8 @@ func NewMongoDBRestoreJob( dbConfig DBConnConfig, locationConfig BackupLocationConfig, restarter agentsRestarter, + folder string, + pbmBackupName string, ) *MongoDBRestoreJob { dbURL := createDBURL(dbConfig) return &MongoDBRestoreJob{ @@ -70,6 +74,8 @@ func NewMongoDBRestoreJob( locationConfig: locationConfig, agentsRestarter: restarter, jobLogger: newPbmJobLogger(id, pbmRestoreJob, dbURL), + folder: folder, + pbmBackupName: pbmBackupName, } } @@ -96,7 +102,14 @@ func (j *MongoDBRestoreJob) Run(ctx context.Context, send Send) error { return errors.Wrapf(err, "lookpath: %s", pbmBin) } - conf, err := createPBMConfig(&j.locationConfig, j.name, false) + artifactFolder := j.folder + + // Old artifacts don't contain pbm backup name. + if j.pbmBackupName == "" { + artifactFolder = j.name + } + + conf, err := createPBMConfig(&j.locationConfig, artifactFolder, false) if err != nil { return errors.WithStack(err) } @@ -123,7 +136,7 @@ func (j *MongoDBRestoreJob) Run(ctx context.Context, send Send) error { } cancel() - snapshot, err := j.findSnapshot(ctx) + snapshot, err := j.findSnapshot(ctx, j.pbmBackupName) if err != nil { j.jobLogger.sendLog(send, err.Error(), false) return errors.WithStack(err) @@ -161,7 +174,7 @@ func (j *MongoDBRestoreJob) Run(ctx context.Context, send Send) error { return nil } -func (j *MongoDBRestoreJob) findSnapshot(ctx context.Context) (*pbmSnapshot, error) { +func (j *MongoDBRestoreJob) findSnapshot(ctx context.Context, snapshotName string) (*pbmSnapshot, error) { j.l.Info("Finding backup entity name.") var list pbmList @@ -180,12 +193,21 @@ func (j *MongoDBRestoreJob) findSnapshot(ctx context.Context) (*pbmSnapshot, err if len(list.Snapshots) == 0 { j.l.Debugf("Try number %d of getting list of artifacts from PBM is failed.", checks) if checks > maxListChecks { - return nil, errors.New("failed to find backup entity") + return nil, errors.Wrap(ErrNotFound, "got no one snapshot") } continue } - return &list.Snapshots[len(list.Snapshots)-1], nil + // Old artifacts don't contain pbm backup name. + if snapshotName == "" { + return &list.Snapshots[len(list.Snapshots)-1], nil + } + + for _, s := range list.Snapshots { + if s.Name == snapshotName { + return &s, nil + } + } case <-ctx.Done(): return nil, ctx.Err() } diff --git a/agent/runner/jobs/mysql_backup_job.go b/agent/runner/jobs/mysql_backup_job.go index 438c2844f5..ac1dc27896 100644 --- a/agent/runner/jobs/mysql_backup_job.go +++ b/agent/runner/jobs/mysql_backup_job.go @@ -19,6 +19,7 @@ import ( "context" "os" "os/exec" + "path" "strconv" "time" @@ -27,6 +28,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/percona/pmm/api/agentpb" + backuppb "github.com/percona/pmm/api/managementpb/backup" ) const ( @@ -43,10 +45,11 @@ type MySQLBackupJob struct { name string connConf DBConnConfig locationConfig BackupLocationConfig + folder string } // NewMySQLBackupJob constructs new Job for MySQL backup. -func NewMySQLBackupJob(id string, timeout time.Duration, name string, connConf DBConnConfig, locationConfig BackupLocationConfig) *MySQLBackupJob { +func NewMySQLBackupJob(id string, timeout time.Duration, name string, connConf DBConnConfig, locationConfig BackupLocationConfig, folder string) *MySQLBackupJob { return &MySQLBackupJob{ id: id, timeout: timeout, @@ -54,6 +57,7 @@ func NewMySQLBackupJob(id string, timeout time.Duration, name string, connConf D name: name, connConf: connConf, locationConfig: locationConfig, + folder: folder, } } @@ -82,11 +86,23 @@ func (j *MySQLBackupJob) Run(ctx context.Context, send Send) error { return errors.WithStack(err) } + // mysqlArtifactFiles returns list of files and folders the backup consists of (hardcoded). + mysqlArtifactFiles := func(backupFolder string) []*backuppb.File { + res := []*backuppb.File{ + {Name: backupFolder, IsDirectory: true}, + } + return res + } + send(&agentpb.JobResult{ JobId: j.id, Timestamp: timestamppb.Now(), Result: &agentpb.JobResult_MysqlBackup{ - MysqlBackup: &agentpb.JobResult_MySQLBackup{}, + MysqlBackup: &agentpb.JobResult_MySQLBackup{ + Metadata: &backuppb.Metadata{ + FileList: mysqlArtifactFiles(j.name), + }, + }, }, }) @@ -153,6 +169,11 @@ func (j *MySQLBackupJob) backup(ctx context.Context) (rerr error) { switch { case j.locationConfig.Type == S3BackupLocationType: xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--stream=xbstream") + + artifactFolder := path.Join(j.folder, j.name) + + j.l.Debugf("Artifact folder is: %s", artifactFolder) + xbcloudCmd = exec.CommandContext(pipeCtx, xbcloudBin, "put", "--storage=s3", @@ -162,7 +183,7 @@ func (j *MySQLBackupJob) backup(ctx context.Context) (rerr error) { "--s3-bucket="+j.locationConfig.S3Config.BucketName, "--s3-region="+j.locationConfig.S3Config.BucketRegion, "--parallel=10", - j.name) // #nosec G204 + artifactFolder) // #nosec G204 default: return errors.Errorf("unknown location config") } diff --git a/agent/runner/jobs/mysql_restore_job.go b/agent/runner/jobs/mysql_restore_job.go index 1427e77c9c..e355b9fee6 100644 --- a/agent/runner/jobs/mysql_restore_job.go +++ b/agent/runner/jobs/mysql_restore_job.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "os/user" + "path" "path/filepath" "regexp" "strconv" @@ -51,16 +52,18 @@ type MySQLRestoreJob struct { l logrus.FieldLogger name string locationConfig BackupLocationConfig + folder string } // NewMySQLRestoreJob constructs new Job for MySQL backup restore. -func NewMySQLRestoreJob(id string, timeout time.Duration, name string, locationConfig BackupLocationConfig) *MySQLRestoreJob { +func NewMySQLRestoreJob(id string, timeout time.Duration, name string, locationConfig BackupLocationConfig, folder string) *MySQLRestoreJob { return &MySQLRestoreJob{ id: id, timeout: timeout, l: logrus.WithFields(logrus.Fields{"id": id, "type": "mysql_restore"}), name: name, locationConfig: locationConfig, + folder: folder, } } @@ -80,7 +83,7 @@ func (j *MySQLRestoreJob) Timeout() time.Duration { } // Run executes backup restore steps. -func (j *MySQLRestoreJob) Run(ctx context.Context, send Send) (rerr error) { +func (j *MySQLRestoreJob) Run(ctx context.Context, send Send) error { if j.locationConfig.S3Config == nil { return errors.New("S3 config is not set") } @@ -162,9 +165,9 @@ func (j *MySQLRestoreJob) binariesInstalled() error { return nil } -func prepareRestoreCommands( +func prepareRestoreCommands( //nolint:nonamedreturns ctx context.Context, - backupName string, + folder string, config *BackupLocationConfig, targetDirectory string, stderr io.Writer, @@ -181,7 +184,7 @@ func prepareRestoreCommands( "--s3-bucket="+config.S3Config.BucketName, "--s3-region="+config.S3Config.BucketRegion, "--parallel=10", - backupName) + folder) xbcloudCmd.Stderr = stderr xbcloudStdout, err := xbcloudCmd.StdoutPipe() @@ -208,9 +211,14 @@ func (j *MySQLRestoreJob) restoreMySQLFromS3(ctx context.Context, targetDirector defer cancel() var stderr, stdout bytes.Buffer + + artifactFolder := path.Join(j.folder, j.name) + + j.l.Debugf("Artifact folder is: %s", artifactFolder) + xbcloudCmd, xbstreamCmd, err := prepareRestoreCommands( pipeCtx, - j.name, + artifactFolder, &j.locationConfig, targetDirectory, &stderr, diff --git a/agent/runner/jobs/pbm_helpers.go b/agent/runner/jobs/pbm_helpers.go index ba9a239430..f78604aeba 100644 --- a/agent/runner/jobs/pbm_helpers.go +++ b/agent/runner/jobs/pbm_helpers.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/AlekSi/pointer" "github.com/pkg/errors" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" @@ -224,9 +225,9 @@ func waitForPBMNoRunningOperations(ctx context.Context, l logrus.FieldLogger, db for { select { case <-ticker.C: - var status pbmStatus - if err := execPBMCommand(ctx, dbURL, &status, "status"); err != nil { - return errors.Wrapf(err, "pbm status error") + status, err := getPBMStatus(ctx, dbURL) + if err != nil { + return err } if status.Running.Type == "" { return nil @@ -237,6 +238,27 @@ func waitForPBMNoRunningOperations(ctx context.Context, l logrus.FieldLogger, db } } +func isShardedCluster(ctx context.Context, dbURL *url.URL) (bool, error) { + status, err := getPBMStatus(ctx, dbURL) + if err != nil { + return false, err + } + + if len(status.Cluster) > 1 { + return true, nil + } + + return false, nil +} + +func getPBMStatus(ctx context.Context, dbURL *url.URL) (*pbmStatus, error) { + var status pbmStatus + if err := execPBMCommand(ctx, dbURL, &status, "status"); err != nil { + return nil, errors.Wrap(err, "pbm status error") + } + return &status, nil +} + func waitForPBMBackup(ctx context.Context, l logrus.FieldLogger, dbURL *url.URL, name string) error { l.Infof("waiting for pbm backup: %s", name) ticker := time.NewTicker(statusCheckInterval) @@ -533,3 +555,23 @@ func groupPartlyDoneErrors(info describeInfo) error { } return errors.New(strings.Join(errMsgs, "; ")) } + +// pbmGetSnapshotTimestamp returns time the backup restores target db to. +func pbmGetSnapshotTimestamp(ctx context.Context, dbURL *url.URL, backupName string) (*time.Time, error) { + var list pbmList + if err := execPBMCommand(ctx, dbURL, &list, "list"); err != nil { + return nil, err + } + + if len(list.Snapshots) == 0 { + return nil, errors.Wrapf(ErrNotFound, "got no one snapshot") + } + + for _, snapshot := range list.Snapshots { + if snapshot.Name == backupName { + return pointer.ToTime(time.Unix(snapshot.RestoreTo, 0)), nil + } + } + + return nil, errors.Wrap(ErrNotFound, "couldn't find required snapshot") +} diff --git a/agent/tailog/store.go b/agent/tailog/store.go index 5846150f42..f3ee18f96c 100644 --- a/agent/tailog/store.go +++ b/agent/tailog/store.go @@ -90,7 +90,7 @@ func (l *Store) GetLogs() ([]string, uint) { replacer := getColorReplacer() l.log.Do(func(p interface{}) { if p != nil { - logs = append(logs, replacer.Replace(p.(string))) + logs = append(logs, replacer.Replace(p.(string))) //nolint:forcetypeassert } }) diff --git a/agent/utils/mongo_fix/mongo_fix.go b/agent/utils/mongo_fix/mongo_fix.go index e6892fdf40..8811fb450f 100644 --- a/agent/utils/mongo_fix/mongo_fix.go +++ b/agent/utils/mongo_fix/mongo_fix.go @@ -18,11 +18,12 @@ package mongo_fix import ( "net/url" + "github.com/AlekSi/pointer" "go.mongodb.org/mongo-driver/mongo/options" ) // ClientOptionsForDSN applies URI to Client. -func ClientOptionsForDSN(dsn string) (*options.ClientOptions, error) { //nolint:unparam +func ClientOptionsForDSN(dsn string) (*options.ClientOptions, error) { clientOptions := options.Client().ApplyURI(dsn) if e := clientOptions.Validate(); e != nil { return nil, e @@ -40,6 +41,9 @@ func ClientOptionsForDSN(dsn string) (*options.ClientOptions, error) { //nolint: if username != "" || password != "" { clientOptions.Auth.Username = username clientOptions.Auth.Password = password + + // set this flag to connect to arbiter when there authentication is enabled + clientOptions.AuthenticateToAnything = pointer.ToBool(true) //nolint:staticcheck } return clientOptions, nil diff --git a/agent/versioner/versioner.go b/agent/versioner/versioner.go index 39c039875d..ec30e4e91a 100644 --- a/agent/versioner/versioner.go +++ b/agent/versioner/versioner.go @@ -94,7 +94,7 @@ func (v *Versioner) binaryVersion( defer cancel() if _, err := v.ef.LookPath(binaryName); err != nil { - if errors.Is(err.(*exec.Error).Err, exec.ErrNotFound) { + if errors.Is(err.(*exec.Error).Err, exec.ErrNotFound) { //nolint:forcetypeassert,errorlint return "", ErrNotFound } @@ -103,7 +103,7 @@ func (v *Versioner) binaryVersion( versionBytes, err := v.ef.CommandContext(ctx, binaryName, arg...).CombinedOutput() if err != nil { - if exitError, ok := err.(*exec.ExitError); ok { + if exitError, ok := err.(*exec.ExitError); ok { //nolint:errorlint if exitError.ExitCode() != expectedExitCode { return "", errors.WithStack(err) } diff --git a/api-tests/helpers.go b/api-tests/helpers.go index 43c16c5864..ea226fb76c 100644 --- a/api-tests/helpers.go +++ b/api-tests/helpers.go @@ -60,7 +60,7 @@ func AssertAPIErrorf(t TestingT, actual error, httpStatus int, grpcCode codes.Co require.Implementsf(t, (*ErrorResponse)(nil), actual, "Wrong response type. Expected %T, got %T.\nError message: %v", (*ErrorResponse)(nil), actual, actual) - assert.Equal(t, httpStatus, actual.(ErrorResponse).Code()) + assert.Equal(t, httpStatus, actual.(ErrorResponse).Code()) //nolint:forcetypeassert,errorlint // Have to use reflect because there are a lot of types with the same structure and different names. payload := reflect.ValueOf(actual).Elem().FieldByName("Payload") diff --git a/api-tests/init.go b/api-tests/init.go index 9970f9d6ec..a750ed683f 100644 --- a/api-tests/init.go +++ b/api-tests/init.go @@ -113,7 +113,7 @@ func Transport(baseURL *url.URL, insecureTLS bool) *httptransport.Runtime { } // disable HTTP/2, set TLS config - httpTransport := transport.Transport.(*http.Transport) + httpTransport := transport.Transport.(*http.Transport) //nolint:forcetypeassert httpTransport.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) if baseURL.Scheme == "https" { httpTransport.TLSClientConfig = tlsconfig.Get() @@ -184,7 +184,7 @@ func init() { go func() { s := <-signals signal.Stop(signals) - logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(syscall.Signal))) + logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(syscall.Signal))) //nolint:forcetypeassert cancel() }() diff --git a/api-tests/management/backup/backups_test.go b/api-tests/management/backup/backups_test.go index bd4dda1f85..95eeee882a 100644 --- a/api-tests/management/backup/backups_test.go +++ b/api-tests/management/backup/backups_test.go @@ -110,6 +110,7 @@ func TestScheduleBackup(t *testing.T) { Mode: pointer.ToString(backups.ScheduleBackupBodyModeSNAPSHOT), Enabled: false, DataModel: pointer.ToString(backups.StartBackupBodyDataModelLOGICAL), + Folder: "backup_folder", }, Context: pmmapitests.Context, }) @@ -150,6 +151,7 @@ func TestScheduleBackup(t *testing.T) { assert.Equal(t, body.Name, backup.Name) assert.Equal(t, body.Description, backup.Description) assert.Equal(t, body.CronExpression, backup.CronExpression) + assert.Equal(t, "backup_folder", backup.Folder) _, err = client.RemoveScheduledBackup(&backups.RemoveScheduledBackupParams{ Body: backups.RemoveScheduledBackupBody{ @@ -312,7 +314,7 @@ func TestScheduleBackup(t *testing.T) { ServiceID: mongo1ID, LocationID: locationID, CronExpression: "0 1 1 1 1", - Name: t.Name(), + Name: "some_backup_name", Description: "testing", Mode: pointer.ToString(backups.ScheduleBackupBodyModePITR), Enabled: true, diff --git a/api-tests/management/dbaas/psmdb_cluster_test.go b/api-tests/management/dbaas/psmdb_cluster_test.go index 3ea6979de7..6a685ce5e5 100644 --- a/api-tests/management/dbaas/psmdb_cluster_test.go +++ b/api-tests/management/dbaas/psmdb_cluster_test.go @@ -228,7 +228,7 @@ func TestPSMDBClusterServer(t *testing.T) { } _, err := dbaasClient.Default.DBClusters.RestartDBCluster(&restartPSMDBClusterParamsParam) require.Error(t, err) - assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) + assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) //nolint:errorlint }) t.Run("DeleteUnknownPSMDBCluster", func(t *testing.T) { @@ -241,7 +241,7 @@ func TestPSMDBClusterServer(t *testing.T) { } _, err := dbaasClient.Default.DBClusters.DeleteDBCluster(&deletePSMDBClusterParamsParam) require.Error(t, err) - assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) + assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) //nolint:errorlint }) t.Run("SuspendResumeCluster", func(t *testing.T) { diff --git a/api-tests/management/dbaas/pxc_cluster_test.go b/api-tests/management/dbaas/pxc_cluster_test.go index d68c2a8d79..c9c8ab07d3 100644 --- a/api-tests/management/dbaas/pxc_cluster_test.go +++ b/api-tests/management/dbaas/pxc_cluster_test.go @@ -261,7 +261,7 @@ func TestPXCClusterServer(t *testing.T) { } _, err := dbaasClient.Default.DBClusters.RestartDBCluster(&restartPXCClusterParamsParam) require.Error(t, err) - assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) + assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) //nolint:errorlint }) t.Run("DeleteUnknownPXCCluster", func(t *testing.T) { @@ -274,7 +274,7 @@ func TestPXCClusterServer(t *testing.T) { } _, err := dbaasClient.Default.DBClusters.DeleteDBCluster(&deletePXCClusterParamsParam) require.Error(t, err) - assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) + assert.Equal(t, 500, err.(pmmapitests.ErrorResponse).Code()) //nolint:errorlint }) t.Run("SuspendResumeCluster", func(t *testing.T) { diff --git a/api-tests/server/settings_test.go b/api-tests/server/settings_test.go index 1add864bd8..eca6865a29 100644 --- a/api-tests/server/settings_test.go +++ b/api-tests/server/settings_test.go @@ -518,7 +518,8 @@ func TestSettings(t *testing.T) { res, err := serverClient.Default.Server.ChangeSettings(&server.ChangeSettingsParams{ Body: server.ChangeSettingsBody{ - AWSPartitions: []string{"aws", "aws", "aws", "aws", "aws", "aws"}, + // We're expecting that 10 elements will be more than number of default partitions, which currently equals 6. + AWSPartitions: []string{"aws", "aws", "aws", "aws", "aws", "aws", "aws", "aws", "aws", "aws"}, }, Context: pmmapitests.Context, }) diff --git a/api-tests/server/updates_test.go b/api-tests/server/updates_test.go index 9ff735b678..560408eb24 100644 --- a/api-tests/server/updates_test.go +++ b/api-tests/server/updates_test.go @@ -174,7 +174,7 @@ func TestUpdate(t *testing.T) { }) if err != nil { // check that we know and understand all possible errors - switch err := err.(type) { + switch err := err.(type) { //nolint:errorlint case *url.Error: // *net.OpError, http.nothingWrittenError, or just io.EOF case *pmmapitests.NginxError: diff --git a/api/agentlocalpb/agentlocal.pb.validate.go b/api/agentlocalpb/agentlocal.pb.validate.go index 2189001b56..22ee2c771d 100644 --- a/api/agentlocalpb/agentlocal.pb.validate.go +++ b/api/agentlocalpb/agentlocal.pb.validate.go @@ -36,7 +36,7 @@ var ( _ = anypb.Any{} _ = sort.Sort - _ = inventorypb.AgentStatus(0) + _ = inventorypb.AgentType(0) ) // Validate checks the field values on ServerInfo with the rules defined in the diff --git a/api/agentpb/agent.pb.go b/api/agentpb/agent.pb.go index 2a5ac5f187..ba9e07886d 100644 --- a/api/agentpb/agent.pb.go +++ b/api/agentpb/agent.pb.go @@ -5125,6 +5125,8 @@ type StartJobRequest_MySQLBackup struct { // // *StartJobRequest_MySQLBackup_S3Config LocationConfig isStartJobRequest_MySQLBackup_LocationConfig `protobuf_oneof:"location_config"` + // Folder to store artifact on a storage. + Folder string `protobuf:"bytes,12,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *StartJobRequest_MySQLBackup) Reset() { @@ -5215,6 +5217,13 @@ func (x *StartJobRequest_MySQLBackup) GetS3Config() *S3LocationConfig { return nil } +func (x *StartJobRequest_MySQLBackup) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type isStartJobRequest_MySQLBackup_LocationConfig interface { isStartJobRequest_MySQLBackup_LocationConfig() } @@ -5241,6 +5250,8 @@ type StartJobRequest_MySQLRestoreBackup struct { // // *StartJobRequest_MySQLRestoreBackup_S3Config LocationConfig isStartJobRequest_MySQLRestoreBackup_LocationConfig `protobuf_oneof:"location_config"` + // Folder to store artifact on a storage. + Folder string `protobuf:"bytes,12,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *StartJobRequest_MySQLRestoreBackup) Reset() { @@ -5303,6 +5314,13 @@ func (x *StartJobRequest_MySQLRestoreBackup) GetS3Config() *S3LocationConfig { return nil } +func (x *StartJobRequest_MySQLRestoreBackup) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type isStartJobRequest_MySQLRestoreBackup_LocationConfig interface { isStartJobRequest_MySQLRestoreBackup_LocationConfig() } @@ -5343,6 +5361,8 @@ type StartJobRequest_MongoDBBackup struct { // *StartJobRequest_MongoDBBackup_S3Config // *StartJobRequest_MongoDBBackup_FilesystemConfig LocationConfig isStartJobRequest_MongoDBBackup_LocationConfig `protobuf_oneof:"location_config"` + // Folder to store artifact on a storage. + Folder string `protobuf:"bytes,12,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *StartJobRequest_MongoDBBackup) Reset() { @@ -5454,6 +5474,13 @@ func (x *StartJobRequest_MongoDBBackup) GetFilesystemConfig() *FilesystemLocatio return nil } +func (x *StartJobRequest_MongoDBBackup) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type isStartJobRequest_MongoDBBackup_LocationConfig interface { isStartJobRequest_MongoDBBackup_LocationConfig() } @@ -5498,6 +5525,10 @@ type StartJobRequest_MongoDBRestoreBackup struct { // *StartJobRequest_MongoDBRestoreBackup_S3Config // *StartJobRequest_MongoDBRestoreBackup_FilesystemConfig LocationConfig isStartJobRequest_MongoDBRestoreBackup_LocationConfig `protobuf_oneof:"location_config"` + // Folder to store artifact on a storage. + Folder string `protobuf:"bytes,12,opt,name=folder,proto3" json:"folder,omitempty"` + // Extra data for backup tool. + PbmMetadata *backup.PbmMetadata `protobuf:"bytes,13,opt,name=pbm_metadata,json=pbmMetadata,proto3" json:"pbm_metadata,omitempty"` } func (x *StartJobRequest_MongoDBRestoreBackup) Reset() { @@ -5602,6 +5633,20 @@ func (x *StartJobRequest_MongoDBRestoreBackup) GetFilesystemConfig() *Filesystem return nil } +func (x *StartJobRequest_MongoDBRestoreBackup) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + +func (x *StartJobRequest_MongoDBRestoreBackup) GetPbmMetadata() *backup.PbmMetadata { + if x != nil { + return x.PbmMetadata + } + return nil +} + type isStartJobRequest_MongoDBRestoreBackup_LocationConfig interface { isStartJobRequest_MongoDBRestoreBackup_LocationConfig() } @@ -5673,6 +5718,10 @@ type JobResult_MongoDBBackup struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + IsShardedCluster bool `protobuf:"varint,1,opt,name=is_sharded_cluster,json=isShardedCluster,proto3" json:"is_sharded_cluster,omitempty"` + // Contains additional data describing artifact. + Metadata *backup.Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (x *JobResult_MongoDBBackup) Reset() { @@ -5707,11 +5756,28 @@ func (*JobResult_MongoDBBackup) Descriptor() ([]byte, []int) { return file_agentpb_agent_proto_rawDescGZIP(), []int{34, 1} } +func (x *JobResult_MongoDBBackup) GetIsShardedCluster() bool { + if x != nil { + return x.IsShardedCluster + } + return false +} + +func (x *JobResult_MongoDBBackup) GetMetadata() *backup.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + // MySQLBackup contains result for MySQL backup job. type JobResult_MySQLBackup struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Contains additional data describing artifact. + Metadata *backup.Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (x *JobResult_MySQLBackup) Reset() { @@ -5746,6 +5812,13 @@ func (*JobResult_MySQLBackup) Descriptor() ([]byte, []int) { return file_agentpb_agent_proto_rawDescGZIP(), []int{34, 2} } +func (x *JobResult_MySQLBackup) GetMetadata() *backup.Metadata { + if x != nil { + return x.Metadata + } + return nil +} + // MySQLRestoreBackup contains result for MySQL restore backup job. type JobResult_MySQLRestoreBackup struct { state protoimpl.MessageState @@ -6995,7 +7068,7 @@ var file_agentpb_agent_proto_rawDesc = []byte{ 0x6b, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x18, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xc8, 0x0c, 0x0a, 0x0f, 0x53, 0x74, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x95, 0x0e, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, @@ -7023,7 +7096,7 @@ var file_agentpb_agent_proto_rawDesc = []byte{ 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x14, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0xe2, 0x01, 0x0a, 0x0b, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x93, 0x02, 0x0a, 0x0b, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, @@ -7036,333 +7109,354 @@ var file_agentpb_agent_proto_rawDesc = []byte{ 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x33, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x08, 0x73, - 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x92, 0x01, 0x0a, 0x12, 0x4d, - 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x33, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x48, 0x00, 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x11, 0x0a, 0x0f, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x8a, 0x03, 0x0a, 0x0d, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x69, 0x74, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x69, 0x74, 0x72, 0x12, 0x33, 0x0a, 0x0a, - 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, - 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x33, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x08, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x11, 0x66, 0x69, 0x6c, - 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, - 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xfe, 0x02, 0x0a, - 0x14, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x41, 0x0a, 0x0e, 0x70, 0x69, 0x74, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x33, - 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x11, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, - 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x05, 0x0a, - 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x28, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x27, - 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x4a, - 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xcb, 0x04, 0x0a, 0x09, 0x4a, - 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0c, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, - 0x0b, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x57, 0x0a, 0x14, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, 0x79, 0x53, - 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, - 0x00, 0x52, 0x12, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x47, 0x0a, 0x0e, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, - 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, - 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, - 0x0d, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x5d, - 0x0a, 0x16, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x2e, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x14, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x21, 0x0a, - 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x1a, 0x0f, 0x0a, 0x0d, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x1a, 0x0d, 0x0a, 0x0b, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x1a, 0x14, 0x0a, 0x12, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x16, 0x0a, 0x14, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, - 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x08, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa7, 0x03, 0x0a, 0x0b, 0x4a, 0x6f, 0x62, - 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x43, 0x0a, 0x0c, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, - 0x00, 0x52, 0x0b, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x59, - 0x0a, 0x14, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, + 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x42, + 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, + 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x33, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x11, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0xa2, 0x03, 0x0a, 0x0d, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x69, 0x74, 0x72, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x69, 0x74, 0x72, 0x12, 0x33, + 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x6f, + 0x64, 0x65, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x33, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x11, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd1, 0x03, 0x0a, 0x14, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, + 0x44, 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0e, 0x70, 0x69, 0x74, + 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, + 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x09, + 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x33, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x08, 0x73, 0x33, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x0c, + 0x70, 0x62, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x62, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x70, 0x62, 0x6d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x11, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x05, 0x0a, 0x03, 0x6a, 0x6f, + 0x62, 0x22, 0x28, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x27, 0x0a, 0x0e, 0x53, + 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, + 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, + 0x6f, 0x62, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdb, 0x05, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0c, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, + 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x57, 0x0a, 0x14, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x47, 0x0a, 0x0e, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x5f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, 0x6f, 0x6e, + 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x6d, 0x6f, + 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x5d, 0x0a, 0x16, 0x6d, + 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, 0x6f, + 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x48, 0x00, 0x52, 0x14, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x21, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x6e, 0x0a, + 0x0d, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2c, + 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3e, 0x0a, + 0x0b, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2f, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x14, 0x0a, + 0x12, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x1a, 0x16, 0x0a, 0x14, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x08, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa7, 0x03, 0x0a, 0x0b, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x43, 0x0a, 0x0c, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, - 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, - 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, - 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x4c, 0x6f, 0x67, 0x73, - 0x48, 0x00, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x1a, 0x0d, 0x0a, 0x0b, 0x4d, 0x79, 0x53, 0x51, - 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x14, 0x0a, 0x12, 0x4d, 0x79, 0x53, 0x51, 0x4c, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x49, 0x0a, - 0x04, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x22, 0x9d, 0x04, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6f, 0x66, - 0x74, 0x77, 0x61, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, - 0x52, 0x09, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x4d, - 0x79, 0x53, 0x51, 0x4c, 0x64, 0x1a, 0x0c, 0x0a, 0x0a, 0x58, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x58, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x1a, 0x08, - 0x0a, 0x06, 0x51, 0x70, 0x72, 0x65, 0x73, 0x73, 0x1a, 0x09, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x67, - 0x6f, 0x44, 0x42, 0x1a, 0x05, 0x0a, 0x03, 0x50, 0x42, 0x4d, 0x1a, 0x85, 0x03, 0x0a, 0x08, 0x53, - 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x12, 0x3a, 0x0a, 0x06, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x64, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0b, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x59, 0x0a, 0x14, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x4d, 0x79, + 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x48, 0x00, 0x52, 0x12, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, + 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x48, 0x00, 0x52, + 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x1a, 0x0d, 0x0a, 0x0b, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x14, 0x0a, 0x12, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x1a, 0x49, 0x0a, 0x04, 0x4c, 0x6f, + 0x67, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, + 0x9d, 0x04, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x09, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, + 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x52, 0x09, 0x73, + 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x73, 0x1a, 0x08, 0x0a, 0x06, 0x4d, 0x79, 0x53, 0x51, + 0x4c, 0x64, 0x1a, 0x0c, 0x0a, 0x0a, 0x58, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x1a, 0x09, 0x0a, 0x07, 0x58, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x1a, 0x08, 0x0a, 0x06, 0x51, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x1a, 0x09, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, + 0x1a, 0x05, 0x0a, 0x03, 0x50, 0x42, 0x4d, 0x1a, 0x85, 0x03, 0x0a, 0x08, 0x53, 0x6f, 0x66, 0x74, + 0x77, 0x61, 0x72, 0x65, 0x12, 0x3a, 0x0a, 0x06, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x64, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, + 0x12, 0x46, 0x0a, 0x0a, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x58, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0a, 0x78, 0x74, + 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3d, 0x0a, 0x07, 0x78, 0x62, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x58, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x48, 0x00, 0x52, 0x07, + 0x78, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x3a, 0x0a, 0x06, 0x71, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x58, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52, - 0x0a, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x3d, 0x0a, 0x07, 0x78, - 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x58, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x48, - 0x00, 0x52, 0x07, 0x78, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x3a, 0x0a, 0x06, 0x71, 0x70, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x51, 0x70, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x06, - 0x71, 0x70, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x6f, 0x6e, - 0x67, 0x6f, 0x64, 0x12, 0x31, 0x0a, 0x03, 0x70, 0x62, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x48, - 0x00, 0x52, 0x03, 0x70, 0x62, 0x6d, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, - 0x72, 0x65, 0x22, 0x90, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x07, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xbb, 0x08, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0xff, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, - 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x71, 0x61, 0x6e, - 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x41, 0x4e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x71, 0x61, 0x6e, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, - 0x00, 0x52, 0x09, 0x6a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x37, 0x0a, 0x0c, - 0x6a, 0x6f, 0x62, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x72, - 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6a, 0x6f, 0x62, 0x50, 0x72, 0x6f, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6e, 0x67, - 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x65, 0x74, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, - 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3c, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x4b, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x09, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x33, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6a, 0x6f, 0x62, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, - 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x39, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6a, 0x6f, 0x62, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x70, 0x62, 0x6d, 0x5f, 0x73, 0x77, 0x69, - 0x74, 0x63, 0x68, 0x5f, 0x70, 0x69, 0x74, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, - 0x50, 0x49, 0x54, 0x52, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, - 0x70, 0x62, 0x6d, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x69, 0x74, 0x72, 0x12, 0x39, 0x0a, - 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4c, - 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x22, 0xc9, 0x07, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0xff, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x73, 0x74, 0x2e, 0x51, 0x70, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x06, 0x71, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, + 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, + 0x12, 0x31, 0x0a, 0x03, 0x70, 0x62, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x48, 0x00, 0x52, 0x03, + 0x70, 0x62, 0x6d, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x6f, 0x66, 0x74, 0x77, 0x61, 0x72, 0x65, 0x22, + 0x90, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0xbb, 0x08, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0xff, 0x0f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x21, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, + 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x71, 0x61, 0x6e, 0x5f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x41, 0x4e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x71, 0x61, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, 0x00, 0x52, 0x09, + 0x6a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x37, 0x0a, 0x0c, 0x6a, 0x6f, 0x62, + 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6a, 0x6f, 0x62, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x48, 0x00, 0x52, - 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x42, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x0b, 0x71, 0x61, 0x6e, - 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x41, 0x4e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x71, 0x61, 0x6e, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, - 0x69, 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x35, - 0x0a, 0x09, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, - 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, - 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6a, 0x6f, - 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, - 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x38, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6a, 0x6f, 0x62, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0f, 0x70, 0x62, 0x6d, 0x5f, 0x73, 0x77, 0x69, 0x74, 0x63, - 0x68, 0x5f, 0x70, 0x69, 0x74, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x49, - 0x54, 0x52, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x62, 0x6d, - 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x69, 0x74, 0x72, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x67, - 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x08, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3f, 0x0a, + 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, + 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x12, 0x33, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x07, 0x73, + 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x39, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x70, 0x62, 0x6d, 0x5f, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, + 0x5f, 0x70, 0x69, 0x74, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x49, 0x54, + 0x52, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x62, 0x6d, + 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x69, 0x74, 0x72, 0x12, 0x39, 0x0a, 0x0a, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x4c, 0x6f, 0x67, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, - 0xc4, 0x01, 0x0a, 0x18, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x23, - 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, - 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x49, 0x4e, 0x56, 0x41, - 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, - 0x58, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x01, 0x12, 0x24, - 0x0a, 0x20, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, - 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x4a, 0x53, - 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x30, 0x0a, 0x2c, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, - 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x5f, - 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x03, 0x32, 0x41, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, - 0x38, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x13, 0x2e, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x14, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6f, 0x0a, 0x09, 0x63, 0x6f, 0x6d, - 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x41, 0x58, 0x58, 0xaa, 0x02, - 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0xca, 0x02, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0xe2, 0x02, - 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0xea, 0x02, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x22, 0xc9, 0x07, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0xff, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x21, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, + 0x6e, 0x67, 0x12, 0x42, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x0b, 0x71, 0x61, 0x6e, 0x5f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x41, 0x4e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x71, 0x61, 0x6e, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x35, 0x0a, 0x09, 0x73, + 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x4a, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4a, + 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x73, + 0x74, 0x6f, 0x70, 0x4a, 0x6f, 0x62, 0x12, 0x38, 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x3e, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x45, 0x0a, 0x0f, 0x70, 0x62, 0x6d, 0x5f, 0x73, 0x77, 0x69, 0x74, 0x63, 0x68, 0x5f, 0x70, + 0x69, 0x74, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x2e, 0x50, 0x42, 0x4d, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x50, 0x49, 0x54, 0x52, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x62, 0x6d, 0x53, 0x77, 0x69, + 0x74, 0x63, 0x68, 0x50, 0x69, 0x74, 0x72, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, + 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, 0xc4, 0x01, 0x0a, + 0x18, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x59, 0x53, + 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x50, 0x55, + 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x10, 0x00, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, + 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, + 0x54, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x4d, + 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, + 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, + 0x02, 0x12, 0x30, 0x0a, 0x2c, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x5f, 0x45, 0x58, 0x50, 0x4c, 0x41, + 0x49, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, + 0x5f, 0x54, 0x52, 0x41, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x5f, 0x4a, 0x53, 0x4f, + 0x4e, 0x10, 0x03, 0x32, 0x41, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x07, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x13, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x14, 0x2e, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6f, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, + 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x41, 0x58, 0x58, 0xaa, 0x02, 0x05, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0xca, 0x02, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0xe2, 0x02, 0x11, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7480,6 +7574,8 @@ var ( (*status.Status)(nil), // 96: google.rpc.Status (inventorypb.AgentType)(0), // 97: inventory.AgentType (backup.DataModel)(0), // 98: backup.v1.DataModel + (*backup.PbmMetadata)(nil), // 99: backup.v1.PbmMetadata + (*backup.Metadata)(nil), // 100: backup.v1.Metadata } ) @@ -7609,19 +7705,22 @@ var file_agentpb_agent_proto_depIdxs = []int32{ 91, // 122: agent.StartJobRequest.MongoDBRestoreBackup.pitr_timestamp:type_name -> google.protobuf.Timestamp 30, // 123: agent.StartJobRequest.MongoDBRestoreBackup.s3_config:type_name -> agent.S3LocationConfig 31, // 124: agent.StartJobRequest.MongoDBRestoreBackup.filesystem_config:type_name -> agent.FilesystemLocationConfig - 83, // 125: agent.GetVersionsRequest.Software.mysqld:type_name -> agent.GetVersionsRequest.MySQLd - 84, // 126: agent.GetVersionsRequest.Software.xtrabackup:type_name -> agent.GetVersionsRequest.Xtrabackup - 85, // 127: agent.GetVersionsRequest.Software.xbcloud:type_name -> agent.GetVersionsRequest.Xbcloud - 86, // 128: agent.GetVersionsRequest.Software.qpress:type_name -> agent.GetVersionsRequest.Qpress - 87, // 129: agent.GetVersionsRequest.Software.mongod:type_name -> agent.GetVersionsRequest.MongoDB - 88, // 130: agent.GetVersionsRequest.Software.pbm:type_name -> agent.GetVersionsRequest.PBM - 40, // 131: agent.Agent.Connect:input_type -> agent.AgentMessage - 41, // 132: agent.Agent.Connect:output_type -> agent.ServerMessage - 132, // [132:133] is the sub-list for method output_type - 131, // [131:132] is the sub-list for method input_type - 131, // [131:131] is the sub-list for extension type_name - 131, // [131:131] is the sub-list for extension extendee - 0, // [0:131] is the sub-list for field type_name + 99, // 125: agent.StartJobRequest.MongoDBRestoreBackup.pbm_metadata:type_name -> backup.v1.PbmMetadata + 100, // 126: agent.JobResult.MongoDBBackup.metadata:type_name -> backup.v1.Metadata + 100, // 127: agent.JobResult.MySQLBackup.metadata:type_name -> backup.v1.Metadata + 83, // 128: agent.GetVersionsRequest.Software.mysqld:type_name -> agent.GetVersionsRequest.MySQLd + 84, // 129: agent.GetVersionsRequest.Software.xtrabackup:type_name -> agent.GetVersionsRequest.Xtrabackup + 85, // 130: agent.GetVersionsRequest.Software.xbcloud:type_name -> agent.GetVersionsRequest.Xbcloud + 86, // 131: agent.GetVersionsRequest.Software.qpress:type_name -> agent.GetVersionsRequest.Qpress + 87, // 132: agent.GetVersionsRequest.Software.mongod:type_name -> agent.GetVersionsRequest.MongoDB + 88, // 133: agent.GetVersionsRequest.Software.pbm:type_name -> agent.GetVersionsRequest.PBM + 40, // 134: agent.Agent.Connect:input_type -> agent.AgentMessage + 41, // 135: agent.Agent.Connect:output_type -> agent.ServerMessage + 135, // [135:136] is the sub-list for method output_type + 134, // [134:135] is the sub-list for method input_type + 134, // [134:134] is the sub-list for extension type_name + 134, // [134:134] is the sub-list for extension extendee + 0, // [0:134] is the sub-list for field type_name } func init() { file_agentpb_agent_proto_init() } diff --git a/api/agentpb/agent.pb.validate.go b/api/agentpb/agent.pb.validate.go index bae8ea5f39..6a25001eb9 100644 --- a/api/agentpb/agent.pb.validate.go +++ b/api/agentpb/agent.pb.validate.go @@ -39,7 +39,7 @@ var ( _ = backupv1.DataModel(0) - _ = inventorypb.AgentType(0) + _ = inventorypb.AgentStatus(0) ) // Validate checks the field values on TextFiles with the rules defined in the @@ -11004,6 +11004,8 @@ func (m *StartJobRequest_MySQLBackup) validate(all bool) error { // no validation rules for Name + // no validation rules for Folder + switch v := m.LocationConfig.(type) { case *StartJobRequest_MySQLBackup_S3Config: if v == nil { @@ -11158,6 +11160,8 @@ func (m *StartJobRequest_MySQLRestoreBackup) validate(all bool) error { // no validation rules for Name + // no validation rules for Folder + switch v := m.LocationConfig.(type) { case *StartJobRequest_MySQLRestoreBackup_S3Config: if v == nil { @@ -11324,6 +11328,8 @@ func (m *StartJobRequest_MongoDBBackup) validate(all bool) error { // no validation rules for DataModel + // no validation rules for Folder + switch v := m.LocationConfig.(type) { case *StartJobRequest_MongoDBBackup_S3Config: if v == nil { @@ -11556,6 +11562,37 @@ func (m *StartJobRequest_MongoDBRestoreBackup) validate(all bool) error { } } + // no validation rules for Folder + + if all { + switch v := interface{}(m.GetPbmMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StartJobRequest_MongoDBRestoreBackupValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StartJobRequest_MongoDBRestoreBackupValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPbmMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StartJobRequest_MongoDBRestoreBackupValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + switch v := m.LocationConfig.(type) { case *StartJobRequest_MongoDBRestoreBackup_S3Config: if v == nil { @@ -11849,6 +11886,37 @@ func (m *JobResult_MongoDBBackup) validate(all bool) error { var errors []error + // no validation rules for IsShardedCluster + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, JobResult_MongoDBBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, JobResult_MongoDBBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return JobResult_MongoDBBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return JobResult_MongoDBBackupMultiError(errors) } @@ -11951,6 +12019,35 @@ func (m *JobResult_MySQLBackup) validate(all bool) error { var errors []error + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, JobResult_MySQLBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, JobResult_MySQLBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return JobResult_MySQLBackupValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return JobResult_MySQLBackupMultiError(errors) } diff --git a/api/agentpb/agent.proto b/api/agentpb/agent.proto index 7fa494b48f..ffd0ec16fe 100644 --- a/api/agentpb/agent.proto +++ b/api/agentpb/agent.proto @@ -475,6 +475,10 @@ message FilesystemLocationConfig { message StartJobRequest { // MySQLBackup is job for backup MySQL service. message MySQLBackup { + // Reserved to implement mysql backup storing in local filesystem. + reserved 11; + reserved "filesystem_config"; + // Database user; string user = 1; // Database password. @@ -491,9 +495,15 @@ message StartJobRequest { oneof location_config { S3LocationConfig s3_config = 10; } + // Folder to store artifact on a storage. + string folder = 12; } // MySQLRestoreBackup is job for MySQL restore backup service. message MySQLRestoreBackup { + // Reserved to implement mysql backup storing in local filesystem. + reserved 11; + reserved "filesystem_config"; + // Service identifier where the backup should be restored. string service_id = 1; // Backup name. @@ -502,6 +512,8 @@ message StartJobRequest { oneof location_config { S3LocationConfig s3_config = 10; } + // Folder to store artifact on a storage. + string folder = 12; } // MongoDBBackup is job for backup MongoDB service. message MongoDBBackup { @@ -526,6 +538,8 @@ message StartJobRequest { S3LocationConfig s3_config = 10; FilesystemLocationConfig filesystem_config = 11; } + // Folder to store artifact on a storage. + string folder = 12; } // MongoDBRestoreBackup is job for MongoDB restore backup service. message MongoDBRestoreBackup { @@ -548,6 +562,10 @@ message StartJobRequest { S3LocationConfig s3_config = 10; FilesystemLocationConfig filesystem_config = 11; } + // Folder to store artifact on a storage. + string folder = 12; + // Extra data for backup tool. + backup.v1.PbmMetadata pbm_metadata = 13; } string job_id = 1; @@ -581,10 +599,17 @@ message JobResult { string message = 1; } // MongoDBBackup contains result for MongoDB backup job. - message MongoDBBackup {} + message MongoDBBackup { + bool is_sharded_cluster = 1; + // Contains additional data describing artifact. + backup.v1.Metadata metadata = 2; + } // MySQLBackup contains result for MySQL backup job. - message MySQLBackup {} + message MySQLBackup { + // Contains additional data describing artifact. + backup.v1.Metadata metadata = 1; + } // MySQLRestoreBackup contains result for MySQL restore backup job. message MySQLRestoreBackup {} diff --git a/api/managementpb/agent/agent.pb.go b/api/managementpb/agent/agent.pb.go index 8a2b6ce763..bd9abdc01b 100644 --- a/api/managementpb/agent/agent.pb.go +++ b/api/managementpb/agent/agent.pb.go @@ -30,7 +30,7 @@ type UniversalAgent struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique randomly generated instance identifier. + // Unique agent identifier. AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` // True if the agent password is set. IsAgentPasswordSet bool `protobuf:"varint,2,opt,name=is_agent_password_set,json=isAgentPasswordSet,proto3" json:"is_agent_password_set,omitempty"` @@ -415,6 +415,8 @@ type ListAgentRequest struct { // Return only Agents that relate to a specific ServiceID. ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Return only Agents that relate to a specific NodeID. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` } func (x *ListAgentRequest) Reset() { @@ -456,6 +458,13 @@ func (x *ListAgentRequest) GetServiceId() string { return "" } +func (x *ListAgentRequest) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + type ListAgentResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1014,10 +1023,11 @@ var file_managementpb_agent_agent_proto_rawDesc = []byte{ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, diff --git a/api/managementpb/agent/agent.pb.validate.go b/api/managementpb/agent/agent.pb.validate.go index 61660f97c9..4edead30fc 100644 --- a/api/managementpb/agent/agent.pb.validate.go +++ b/api/managementpb/agent/agent.pb.validate.go @@ -393,16 +393,9 @@ func (m *ListAgentRequest) validate(all bool) error { var errors []error - if utf8.RuneCountInString(m.GetServiceId()) < 1 { - err := ListAgentRequestValidationError{ - field: "ServiceId", - reason: "value length must be at least 1 runes", - } - if !all { - return err - } - errors = append(errors, err) - } + // no validation rules for ServiceId + + // no validation rules for NodeId if len(errors) > 0 { return ListAgentRequestMultiError(errors) diff --git a/api/managementpb/agent/agent.proto b/api/managementpb/agent/agent.proto index 1ccd5ff48e..6a9ed9a487 100644 --- a/api/managementpb/agent/agent.proto +++ b/api/managementpb/agent/agent.proto @@ -60,7 +60,7 @@ message UniversalAgent { bool is_ssl_key_set = 3; } - // Unique randomly generated instance identifier. + // Unique agent identifier. string agent_id = 1; // True if the agent password is set. bool is_agent_password_set = 2; @@ -141,8 +141,12 @@ message UniversalAgent { } message ListAgentRequest { + // Only one of the parameters below must be set. + // Return only Agents that relate to a specific ServiceID. - string service_id = 1 [(validate.rules).string.min_len = 1]; + string service_id = 1; + // Return only Agents that relate to a specific NodeID. + string node_id = 2; } message ListAgentResponse { diff --git a/api/managementpb/agent/json/agent.json b/api/managementpb/agent/json/agent.json index 8b3dbe4fb9..571bb85d47 100644 --- a/api/managementpb/agent/json/agent.json +++ b/api/managementpb/agent/json/agent.json @@ -25,12 +25,19 @@ "operationId": "ListAgents", "parameters": [ { + "description": "Only one of the parameters below must be set.", "name": "body", "in": "body", "required": true, "schema": { + "description": "Only one of the parameters below must be set.", "type": "object", "properties": { + "node_id": { + "description": "Return only Agents that relate to a specific NodeID.", + "type": "string", + "x-order": 1 + }, "service_id": { "description": "Return only Agents that relate to a specific ServiceID.", "type": "string", @@ -53,7 +60,7 @@ "type": "object", "properties": { "agent_id": { - "description": "Unique randomly generated instance identifier.", + "description": "Unique agent identifier.", "type": "string", "x-order": 0 }, diff --git a/api/managementpb/agent/json/client/agent/list_agents_parameters.go b/api/managementpb/agent/json/client/agent/list_agents_parameters.go index d79ab0fb57..5116dca42d 100644 --- a/api/managementpb/agent/json/client/agent/list_agents_parameters.go +++ b/api/managementpb/agent/json/client/agent/list_agents_parameters.go @@ -60,7 +60,10 @@ ListAgentsParams contains all the parameters to send to the API endpoint Typically these are written to a http.Request. */ type ListAgentsParams struct { - // Body. + /* Body. + + Only one of the parameters below must be set. + */ Body ListAgentsBody timeout time.Duration diff --git a/api/managementpb/agent/json/client/agent/list_agents_responses.go b/api/managementpb/agent/json/client/agent/list_agents_responses.go index fc510d39cd..a0b2abc3d1 100644 --- a/api/managementpb/agent/json/client/agent/list_agents_responses.go +++ b/api/managementpb/agent/json/client/agent/list_agents_responses.go @@ -120,12 +120,15 @@ func (o *ListAgentsDefault) readResponse(response runtime.ClientResponse, consum } /* -ListAgentsBody list agents body +ListAgentsBody Only one of the parameters below must be set. swagger:model ListAgentsBody */ type ListAgentsBody struct { // Return only Agents that relate to a specific ServiceID. ServiceID string `json:"service_id,omitempty"` + + // Return only Agents that relate to a specific NodeID. + NodeID string `json:"node_id,omitempty"` } // Validate validates this list agents body @@ -400,7 +403,7 @@ ListAgentsOKBodyAgentsItems0 list agents OK body agents items0 swagger:model ListAgentsOKBodyAgentsItems0 */ type ListAgentsOKBodyAgentsItems0 struct { - // Unique randomly generated instance identifier. + // Unique agent identifier. AgentID string `json:"agent_id,omitempty"` // True if the agent password is set. diff --git a/api/managementpb/alerting/alerting.pb.validate.go b/api/managementpb/alerting/alerting.pb.validate.go index d146af0a0d..b3d3e3d9e3 100644 --- a/api/managementpb/alerting/alerting.pb.validate.go +++ b/api/managementpb/alerting/alerting.pb.validate.go @@ -36,7 +36,7 @@ var ( _ = anypb.Any{} _ = sort.Sort - _ = managementpb.Severity(0) + _ = managementpb.BooleanFlag(0) ) // Validate checks the field values on BoolParamDefinition with the rules diff --git a/api/managementpb/backup/artifacts.pb.go b/api/managementpb/backup/artifacts.pb.go index 54a4854e8c..8a1619eb59 100644 --- a/api/managementpb/backup/artifacts.pb.go +++ b/api/managementpb/backup/artifacts.pb.go @@ -27,14 +27,15 @@ const ( type BackupStatus int32 const ( - BackupStatus_BACKUP_STATUS_INVALID BackupStatus = 0 - BackupStatus_BACKUP_STATUS_PENDING BackupStatus = 1 - BackupStatus_BACKUP_STATUS_IN_PROGRESS BackupStatus = 2 - BackupStatus_BACKUP_STATUS_PAUSED BackupStatus = 3 - BackupStatus_BACKUP_STATUS_SUCCESS BackupStatus = 4 - BackupStatus_BACKUP_STATUS_ERROR BackupStatus = 5 - BackupStatus_BACKUP_STATUS_DELETING BackupStatus = 6 - BackupStatus_BACKUP_STATUS_FAILED_TO_DELETE BackupStatus = 7 + BackupStatus_BACKUP_STATUS_INVALID BackupStatus = 0 + BackupStatus_BACKUP_STATUS_PENDING BackupStatus = 1 + BackupStatus_BACKUP_STATUS_IN_PROGRESS BackupStatus = 2 + BackupStatus_BACKUP_STATUS_PAUSED BackupStatus = 3 + BackupStatus_BACKUP_STATUS_SUCCESS BackupStatus = 4 + BackupStatus_BACKUP_STATUS_ERROR BackupStatus = 5 + BackupStatus_BACKUP_STATUS_DELETING BackupStatus = 6 + BackupStatus_BACKUP_STATUS_FAILED_TO_DELETE BackupStatus = 7 + BackupStatus_BACKUP_STATUS_CLEANUP_IN_PROGRESS BackupStatus = 8 ) // Enum value maps for BackupStatus. @@ -48,16 +49,18 @@ var ( 5: "BACKUP_STATUS_ERROR", 6: "BACKUP_STATUS_DELETING", 7: "BACKUP_STATUS_FAILED_TO_DELETE", + 8: "BACKUP_STATUS_CLEANUP_IN_PROGRESS", } BackupStatus_value = map[string]int32{ - "BACKUP_STATUS_INVALID": 0, - "BACKUP_STATUS_PENDING": 1, - "BACKUP_STATUS_IN_PROGRESS": 2, - "BACKUP_STATUS_PAUSED": 3, - "BACKUP_STATUS_SUCCESS": 4, - "BACKUP_STATUS_ERROR": 5, - "BACKUP_STATUS_DELETING": 6, - "BACKUP_STATUS_FAILED_TO_DELETE": 7, + "BACKUP_STATUS_INVALID": 0, + "BACKUP_STATUS_PENDING": 1, + "BACKUP_STATUS_IN_PROGRESS": 2, + "BACKUP_STATUS_PAUSED": 3, + "BACKUP_STATUS_SUCCESS": 4, + "BACKUP_STATUS_ERROR": 5, + "BACKUP_STATUS_DELETING": 6, + "BACKUP_STATUS_FAILED_TO_DELETE": 7, + "BACKUP_STATUS_CLEANUP_IN_PROGRESS": 8, } ) @@ -116,6 +119,12 @@ type Artifact struct { CreatedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // Backup mode. Mode BackupMode `protobuf:"varint,11,opt,name=mode,proto3,enum=backup.v1.BackupMode" json:"mode,omitempty"` + // Source database setup type. + IsShardedCluster bool `protobuf:"varint,12,opt,name=is_sharded_cluster,json=isShardedCluster,proto3" json:"is_sharded_cluster,omitempty"` + // Folder to store artifact on a storage. + Folder string `protobuf:"bytes,13,opt,name=folder,proto3" json:"folder,omitempty"` + // List of artifact metadata. + MetadataList []*Metadata `protobuf:"bytes,14,rep,name=metadata_list,json=metadataList,proto3" json:"metadata_list,omitempty"` } func (x *Artifact) Reset() { @@ -227,6 +236,27 @@ func (x *Artifact) GetMode() BackupMode { return BackupMode_BACKUP_MODE_INVALID } +func (x *Artifact) GetIsShardedCluster() bool { + if x != nil { + return x.IsShardedCluster + } + return false +} + +func (x *Artifact) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + +func (x *Artifact) GetMetadataList() []*Metadata { + if x != nil { + return x.MetadataList + } + return nil +} + type ListArtifactsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -571,7 +601,7 @@ var file_managementpb_backup_artifacts_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xab, 0x03, 0x0a, 0x08, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x1f, + 0x6f, 0x22, 0xab, 0x04, 0x0a, 0x08, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, @@ -597,7 +627,15 @@ var file_managementpb_backup_artifacts_proto_rawDesc = []byte{ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, + 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4a, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, @@ -630,7 +668,7 @@ var file_managementpb_backup_artifacts_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x74, 0x69, - 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x2a, 0xf1, 0x01, 0x0a, 0x0c, 0x42, 0x61, 0x63, + 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x2a, 0x98, 0x02, 0x0a, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x15, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x53, @@ -645,46 +683,48 @@ var file_managementpb_backup_artifacts_proto_rawDesc = []byte{ 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x22, 0x0a, 0x1e, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x5f, 0x54, 0x4f, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x07, 0x32, 0xbf, 0x03, 0x0a, - 0x09, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x83, 0x01, 0x0a, 0x0d, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, - 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, - 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x88, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, - 0x61, 0x63, 0x74, 0x12, 0x20, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, - 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, - 0x61, 0x63, 0x74, 0x73, 0x2f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0xa0, 0x01, 0x0a, 0x12, - 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x73, 0x12, 0x24, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, - 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, + 0x5f, 0x54, 0x4f, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x07, 0x12, 0x25, 0x0a, 0x21, + 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, + 0x45, 0x41, 0x4e, 0x55, 0x50, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, + 0x53, 0x10, 0x08, 0x32, 0xbf, 0x03, 0x0a, 0x09, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x73, 0x12, 0x83, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x01, + 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x88, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x20, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, - 0x50, 0x49, 0x54, 0x52, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x42, 0x9d, - 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x42, 0x0e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, - 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x3b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x58, - 0x58, 0xaa, 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x09, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x15, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x2f, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0xa0, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x74, 0x72, 0x54, + 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, + 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x50, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, + 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x49, 0x54, 0x52, 0x54, 0x69, 0x6d, 0x65, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x42, 0x9d, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, + 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x76, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x58, 0x58, 0xaa, 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, + 0xe2, 0x02, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -715,6 +755,7 @@ var ( (DataModel)(0), // 9: backup.v1.DataModel (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp (BackupMode)(0), // 11: backup.v1.BackupMode + (*Metadata)(nil), // 12: backup.v1.Metadata } ) @@ -723,21 +764,22 @@ var file_managementpb_backup_artifacts_proto_depIdxs = []int32{ 0, // 1: backup.v1.Artifact.status:type_name -> backup.v1.BackupStatus 10, // 2: backup.v1.Artifact.created_at:type_name -> google.protobuf.Timestamp 11, // 3: backup.v1.Artifact.mode:type_name -> backup.v1.BackupMode - 1, // 4: backup.v1.ListArtifactsResponse.artifacts:type_name -> backup.v1.Artifact - 10, // 5: backup.v1.PitrTimerange.start_timestamp:type_name -> google.protobuf.Timestamp - 10, // 6: backup.v1.PitrTimerange.end_timestamp:type_name -> google.protobuf.Timestamp - 6, // 7: backup.v1.ListPitrTimerangesResponse.timeranges:type_name -> backup.v1.PitrTimerange - 2, // 8: backup.v1.Artifacts.ListArtifacts:input_type -> backup.v1.ListArtifactsRequest - 4, // 9: backup.v1.Artifacts.DeleteArtifact:input_type -> backup.v1.DeleteArtifactRequest - 7, // 10: backup.v1.Artifacts.ListPitrTimeranges:input_type -> backup.v1.ListPitrTimerangesRequest - 3, // 11: backup.v1.Artifacts.ListArtifacts:output_type -> backup.v1.ListArtifactsResponse - 5, // 12: backup.v1.Artifacts.DeleteArtifact:output_type -> backup.v1.DeleteArtifactResponse - 8, // 13: backup.v1.Artifacts.ListPitrTimeranges:output_type -> backup.v1.ListPitrTimerangesResponse - 11, // [11:14] is the sub-list for method output_type - 8, // [8:11] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 12, // 4: backup.v1.Artifact.metadata_list:type_name -> backup.v1.Metadata + 1, // 5: backup.v1.ListArtifactsResponse.artifacts:type_name -> backup.v1.Artifact + 10, // 6: backup.v1.PitrTimerange.start_timestamp:type_name -> google.protobuf.Timestamp + 10, // 7: backup.v1.PitrTimerange.end_timestamp:type_name -> google.protobuf.Timestamp + 6, // 8: backup.v1.ListPitrTimerangesResponse.timeranges:type_name -> backup.v1.PitrTimerange + 2, // 9: backup.v1.Artifacts.ListArtifacts:input_type -> backup.v1.ListArtifactsRequest + 4, // 10: backup.v1.Artifacts.DeleteArtifact:input_type -> backup.v1.DeleteArtifactRequest + 7, // 11: backup.v1.Artifacts.ListPitrTimeranges:input_type -> backup.v1.ListPitrTimerangesRequest + 3, // 12: backup.v1.Artifacts.ListArtifacts:output_type -> backup.v1.ListArtifactsResponse + 5, // 13: backup.v1.Artifacts.DeleteArtifact:output_type -> backup.v1.DeleteArtifactResponse + 8, // 14: backup.v1.Artifacts.ListPitrTimeranges:output_type -> backup.v1.ListPitrTimerangesResponse + 12, // [12:15] is the sub-list for method output_type + 9, // [9:12] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_managementpb_backup_artifacts_proto_init() } diff --git a/api/managementpb/backup/artifacts.pb.validate.go b/api/managementpb/backup/artifacts.pb.validate.go index 3103d5910e..2f60a60469 100644 --- a/api/managementpb/backup/artifacts.pb.validate.go +++ b/api/managementpb/backup/artifacts.pb.validate.go @@ -106,6 +106,44 @@ func (m *Artifact) validate(all bool) error { // no validation rules for Mode + // no validation rules for IsShardedCluster + + // no validation rules for Folder + + for idx, item := range m.GetMetadataList() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("MetadataList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("MetadataList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: fmt.Sprintf("MetadataList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if len(errors) > 0 { return ArtifactMultiError(errors) } diff --git a/api/managementpb/backup/artifacts.proto b/api/managementpb/backup/artifacts.proto index e2e1315fc5..69312bfe64 100644 --- a/api/managementpb/backup/artifacts.proto +++ b/api/managementpb/backup/artifacts.proto @@ -18,6 +18,7 @@ enum BackupStatus { BACKUP_STATUS_ERROR = 5; BACKUP_STATUS_DELETING = 6; BACKUP_STATUS_FAILED_TO_DELETE = 7; + BACKUP_STATUS_CLEANUP_IN_PROGRESS = 8; } // Artifact represents single backup artifact. @@ -44,6 +45,12 @@ message Artifact { google.protobuf.Timestamp created_at = 10; // Backup mode. BackupMode mode = 11; + // Source database setup type. + bool is_sharded_cluster = 12; + // Folder to store artifact on a storage. + string folder = 13; + // List of artifact metadata. + repeated backup.v1.Metadata metadata_list = 14; } message ListArtifactsRequest {} diff --git a/api/managementpb/backup/backups.pb.go b/api/managementpb/backup/backups.pb.go index a43dc77447..aee644ed82 100644 --- a/api/managementpb/backup/backups.pb.go +++ b/api/managementpb/backup/backups.pb.go @@ -48,6 +48,8 @@ type StartBackupRequest struct { Retries uint32 `protobuf:"varint,6,opt,name=retries,proto3" json:"retries,omitempty"` // DataModel represents the data model used for the backup. DataModel DataModel `protobuf:"varint,7,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"` + // Folder on storage for artifact. + Folder string `protobuf:"bytes,8,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *StartBackupRequest) Reset() { @@ -131,6 +133,13 @@ func (x *StartBackupRequest) GetDataModel() DataModel { return DataModel_DATA_MODEL_INVALID } +func (x *StartBackupRequest) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type StartBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -438,6 +447,8 @@ type ScheduledBackup struct { Retention uint32 `protobuf:"varint,18,opt,name=retention,proto3" json:"retention,omitempty"` // Backup mode. Mode BackupMode `protobuf:"varint,19,opt,name=mode,proto3,enum=backup.v1.BackupMode" json:"mode,omitempty"` + // Folder on storage for artifact. + Folder string `protobuf:"bytes,20,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *ScheduledBackup) Reset() { @@ -598,6 +609,13 @@ func (x *ScheduledBackup) GetMode() BackupMode { return BackupMode_BACKUP_MODE_INVALID } +func (x *ScheduledBackup) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type ScheduleBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -627,6 +645,8 @@ type ScheduleBackupRequest struct { Mode BackupMode `protobuf:"varint,11,opt,name=mode,proto3,enum=backup.v1.BackupMode" json:"mode,omitempty"` // Backup data model (physical or logical). DataModel DataModel `protobuf:"varint,12,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"` + // Folder on storage for artifact. + Folder string `protobuf:"bytes,13,opt,name=folder,proto3" json:"folder,omitempty"` } func (x *ScheduleBackupRequest) Reset() { @@ -745,6 +765,13 @@ func (x *ScheduleBackupRequest) GetDataModel() DataModel { return DataModel_DATA_MODEL_INVALID } +func (x *ScheduleBackupRequest) GetFolder() string { + if x != nil { + return x.Folder + } + return "" +} + type ScheduleBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1322,7 +1349,7 @@ var file_managementpb_backup_backups_proto_rawDesc = []byte{ 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x02, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, @@ -1341,85 +1368,88 @@ var file_managementpb_backup_backups_proto_rawDesc = []byte{ 0x33, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4d, - 0x6f, 0x64, 0x65, 0x6c, 0x22, 0x36, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, - 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x25, - 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, - 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, - 0x8c, 0x01, 0x0a, 0x26, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x79, - 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x6e, 0x76, 0x65, - 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x52, 0x05, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x12, 0x33, 0x0a, 0x07, 0x6d, 0x6f, 0x6e, - 0x67, 0x6f, 0x64, 0x62, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x69, 0x6e, 0x76, - 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x22, 0xab, - 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x28, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, - 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x0e, 0x70, 0x69, 0x74, - 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, - 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x36, 0x0a, 0x15, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x49, 0x64, 0x22, 0xdd, 0x05, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x72, 0x6f, 0x6e, 0x45, - 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0e, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x33, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, - 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x12, 0x35, 0x0a, - 0x08, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x6c, 0x61, 0x73, - 0x74, 0x52, 0x75, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x75, 0x6e, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x75, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, - 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x80, 0x04, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x22, 0x36, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x25, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, + 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x26, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, + 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x4d, 0x79, + 0x53, 0x51, 0x4c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x05, 0x6d, 0x79, 0x73, 0x71, + 0x6c, 0x12, 0x33, 0x0a, 0x07, 0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x4d, + 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x6d, + 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x22, 0xab, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x26, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, + 0x64, 0x12, 0x41, 0x0a, 0x0e, 0x70, 0x69, 0x74, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, 0x69, 0x74, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0x36, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x22, 0xf5, 0x05, 0x0a, + 0x0f, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x6f, 0x6e, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x63, 0x72, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, + 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x12, 0x35, 0x0a, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x75, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x75, 0x6e, 0x12, 0x35, 0x0a, 0x08, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, + 0x52, 0x75, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x22, 0x98, 0x04, 0x0a, 0x15, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x73, 0x65, 0x72, @@ -1451,223 +1481,225 @@ var file_managementpb_backup_backups_proto_rawDesc = []byte{ 0x64, 0x65, 0x12, 0x33, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x09, 0x64, 0x61, - 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x22, 0x48, 0x0a, 0x16, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, - 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x67, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x47, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0xb7, 0x04, 0x0a, 0x1c, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x13, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x72, 0x6f, - 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0e, 0x63, 0x72, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x22, + 0x48, 0x0a, 0x16, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x67, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x22, 0xb7, 0x04, 0x0a, 0x1c, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x37, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x72, 0x6f, 0x6e, 0x45, 0x78, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x3a, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, - 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x36, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x1f, 0x0a, 0x1d, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x1c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x22, 0x1f, 0x0a, - 0x1d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x22, 0x4c, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x43, - 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x39, 0x0a, 0x08, - 0x4c, 0x6f, 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0xb0, 0x10, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x12, 0xe9, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x12, 0x1d, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x9a, 0x03, 0x92, 0x41, 0xe8, 0x02, 0x1a, 0xe5, 0x02, 0x43, 0x6f, 0x75, 0x6c, - 0x64, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6e, 0x67, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, 0x72, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x3a, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x58, 0x54, - 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x53, - 0x54, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x20, 0x2d, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, - 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, - 0x50, 0x20, 0x2d, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x78, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x43, 0x4f, - 0x4d, 0x50, 0x41, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, - 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, - 0x6c, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x74, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x3a, 0x01, 0x2a, 0x22, 0x23, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, - 0xce, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x30, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, - 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, - 0x01, 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1f, 0x0a, 0x1d, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x1c, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x13, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x49, 0x64, 0x22, 0x1f, 0x0a, 0x1d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6c, 0x6f, 0x67, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x04, 0x6c, 0x6f, + 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x22, 0x39, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, + 0xb0, 0x10, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0xe9, 0x03, 0x0a, 0x0b, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1d, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x03, 0x92, 0x41, 0xe8, + 0x02, 0x1a, 0xe5, 0x02, 0x43, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x20, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x20, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x3a, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x20, 0x2d, + 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, + 0x6f, 0x74, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x58, + 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x64, 0x69, 0x66, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6f, + 0x66, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x78, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, + 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x54, 0x49, 0x42, 0x4c, 0x45, + 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x78, 0x74, + 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x61, 0x6b, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x3a, + 0x01, 0x2a, 0x22, 0x23, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x12, 0xf8, 0x04, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x12, 0x1f, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa3, 0x04, 0x92, 0x41, 0xef, 0x03, 0x1a, 0xec, 0x03, 0x43, - 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x69, 0x6e, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, 0x72, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x3a, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, - 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4c, 0x4c, 0x45, 0x44, 0x20, 0x2d, 0x20, 0x78, 0x74, 0x72, 0x61, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, - 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, - 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x78, 0x74, 0x72, - 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x78, 0x62, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, - 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x5f, 0x58, 0x54, 0x52, 0x41, - 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, - 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x4d, 0x79, 0x53, 0x51, 0x4c, - 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x0a, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, - 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x20, 0x2d, 0x20, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x20, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, - 0x6c, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, - 0x66, 0x61, 0x63, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, - 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x2f, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0e, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, - 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, - 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x9f, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, - 0x26, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x2f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0xce, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, + 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x30, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x01, 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0xf8, 0x04, 0x0a, 0x0d, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1f, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa3, 0x04, + 0x92, 0x41, 0xef, 0x03, 0x1a, 0xec, 0x03, 0x43, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, + 0x64, 0x65, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x3a, 0x0a, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, + 0x4b, 0x55, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4c, 0x4c, 0x45, + 0x44, 0x20, 0x2d, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x69, + 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, + 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, 0x20, 0x64, + 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x20, 0x6f, 0x66, 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x78, 0x62, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x0a, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x54, 0x49, + 0x42, 0x4c, 0x45, 0x5f, 0x58, 0x54, 0x52, 0x41, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x20, 0x2d, + 0x20, 0x78, 0x74, 0x72, 0x61, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x20, 0x69, 0x73, 0x20, 0x6e, + 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x4d, 0x79, 0x53, 0x51, 0x4c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x61, 0x6b, + 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x0a, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x54, + 0x49, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x4d, 0x59, 0x53, 0x51, + 0x4c, 0x20, 0x2d, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x4d, 0x79, 0x53, 0x51, 0x4c, + 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, + 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x0e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x9f, + 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x26, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x01, 0x2a, 0x22, 0x2b, 0x2f, 0x76, 0x31, - 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, 0xa4, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x12, 0x27, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, - 0x22, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, - 0xa4, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x27, 0x2e, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, 0x22, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, 0x72, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, - 0x73, 0x12, 0x19, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, - 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, + 0x3a, 0x01, 0x2a, 0x22, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x0d, 0x63, - 0x6f, 0x6d, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, - 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3b, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x58, 0x58, 0xaa, 0x02, 0x09, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, - 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x73, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, + 0x12, 0xa4, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x27, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, 0x22, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, 0xa4, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x27, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, 0x22, + 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, 0x72, + 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x19, 0x2e, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, + 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x4c, 0x6f, + 0x67, 0x73, 0x42, 0x9b, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x3b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x76, 0x31, 0xa2, 0x02, 0x03, + 0x42, 0x58, 0x58, 0xaa, 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x09, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x15, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/api/managementpb/backup/backups.pb.validate.go b/api/managementpb/backup/backups.pb.validate.go index 0b4d5e5ffc..ba5d4e9d5d 100644 --- a/api/managementpb/backup/backups.pb.validate.go +++ b/api/managementpb/backup/backups.pb.validate.go @@ -116,6 +116,8 @@ func (m *StartBackupRequest) validate(all bool) error { // no validation rules for DataModel + // no validation rules for Folder + if len(errors) > 0 { return StartBackupRequestMultiError(errors) } @@ -1013,6 +1015,8 @@ func (m *ScheduledBackup) validate(all bool) error { // no validation rules for Mode + // no validation rules for Folder + if len(errors) > 0 { return ScheduledBackupMultiError(errors) } @@ -1218,6 +1222,8 @@ func (m *ScheduleBackupRequest) validate(all bool) error { // no validation rules for DataModel + // no validation rules for Folder + if len(errors) > 0 { return ScheduleBackupRequestMultiError(errors) } diff --git a/api/managementpb/backup/backups.proto b/api/managementpb/backup/backups.proto index f12b083c67..347f342024 100644 --- a/api/managementpb/backup/backups.proto +++ b/api/managementpb/backup/backups.proto @@ -28,6 +28,8 @@ message StartBackupRequest { uint32 retries = 6; // DataModel represents the data model used for the backup. DataModel data_model = 7; + // Folder on storage for artifact. + string folder = 8; } message StartBackupResponse { @@ -97,6 +99,8 @@ message ScheduledBackup { uint32 retention = 18; // Backup mode. BackupMode mode = 19; + // Folder on storage for artifact. + string folder = 20; } message ScheduleBackupRequest { @@ -122,9 +126,10 @@ message ScheduleBackupRequest { uint32 retention = 10; // Backup mode. BackupMode mode = 11; - // Backup data model (physical or logical). DataModel data_model = 12; + // Folder on storage for artifact. + string folder = 13; } message ScheduleBackupResponse { diff --git a/api/managementpb/backup/common.pb.go b/api/managementpb/backup/common.pb.go index 38e74046af..8af65718a2 100644 --- a/api/managementpb/backup/common.pb.go +++ b/api/managementpb/backup/common.pb.go @@ -10,8 +10,10 @@ import ( reflect "reflect" sync "sync" + _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -124,12 +126,228 @@ func (BackupMode) EnumDescriptor() ([]byte, []int) { return file_managementpb_backup_common_proto_rawDescGZIP(), []int{1} } +// File represents file or folder on a storage. +type File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` +} + +func (x *File) Reset() { + *x = File{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_backup_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*File) ProtoMessage() {} + +func (x *File) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_backup_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use File.ProtoReflect.Descriptor instead. +func (*File) Descriptor() ([]byte, []int) { + return file_managementpb_backup_common_proto_rawDescGZIP(), []int{0} +} + +func (x *File) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *File) GetIsDirectory() bool { + if x != nil { + return x.IsDirectory + } + return false +} + +// PbmMetadata contains additional data for pbm cli tools. +type PbmMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of backup in backup tool representation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *PbmMetadata) Reset() { + *x = PbmMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_backup_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PbmMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PbmMetadata) ProtoMessage() {} + +func (x *PbmMetadata) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_backup_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PbmMetadata.ProtoReflect.Descriptor instead. +func (*PbmMetadata) Descriptor() ([]byte, []int) { + return file_managementpb_backup_common_proto_rawDescGZIP(), []int{1} +} + +func (x *PbmMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Metadata contains extra artifact data like files it consists of, tool specific data, etc. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of files backup consists of. + FileList []*File `protobuf:"bytes,1,rep,name=file_list,json=fileList,proto3" json:"file_list,omitempty"` + // Exact time DB can be restored to. + RestoreTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=restore_to,json=restoreTo,proto3" json:"restore_to,omitempty"` + // Extra data for backup tools. + // + // Types that are assignable to BackupToolMetadata: + // + // *Metadata_PbmMetadata + BackupToolMetadata isMetadata_BackupToolMetadata `protobuf_oneof:"backup_tool_metadata"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_backup_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_backup_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_managementpb_backup_common_proto_rawDescGZIP(), []int{2} +} + +func (x *Metadata) GetFileList() []*File { + if x != nil { + return x.FileList + } + return nil +} + +func (x *Metadata) GetRestoreTo() *timestamppb.Timestamp { + if x != nil { + return x.RestoreTo + } + return nil +} + +func (m *Metadata) GetBackupToolMetadata() isMetadata_BackupToolMetadata { + if m != nil { + return m.BackupToolMetadata + } + return nil +} + +func (x *Metadata) GetPbmMetadata() *PbmMetadata { + if x, ok := x.GetBackupToolMetadata().(*Metadata_PbmMetadata); ok { + return x.PbmMetadata + } + return nil +} + +type isMetadata_BackupToolMetadata interface { + isMetadata_BackupToolMetadata() +} + +type Metadata_PbmMetadata struct { + PbmMetadata *PbmMetadata `protobuf:"bytes,3,opt,name=pbm_metadata,json=pbmMetadata,proto3,oneof"` +} + +func (*Metadata_PbmMetadata) isMetadata_BackupToolMetadata() {} + var File_managementpb_backup_common_proto protoreflect.FileDescriptor var file_managementpb_backup_common_proto_rawDesc = []byte{ 0x0a, 0x20, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2a, 0x3e, 0x0a, + 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, + 0x21, 0x0a, 0x0b, 0x50, 0x62, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x2c, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x39, 0x0a, + 0x0a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x12, 0x3b, 0x0a, 0x0c, 0x70, 0x62, 0x6d, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x62, 0x6d, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x62, 0x6d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x16, 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, + 0x74, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2a, 0x3e, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x48, 0x59, 0x53, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, @@ -166,18 +384,26 @@ func file_managementpb_backup_common_proto_rawDescGZIP() []byte { var ( file_managementpb_backup_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) + file_managementpb_backup_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3) file_managementpb_backup_common_proto_goTypes = []interface{}{ - (DataModel)(0), // 0: backup.v1.DataModel - (BackupMode)(0), // 1: backup.v1.BackupMode + (DataModel)(0), // 0: backup.v1.DataModel + (BackupMode)(0), // 1: backup.v1.BackupMode + (*File)(nil), // 2: backup.v1.File + (*PbmMetadata)(nil), // 3: backup.v1.PbmMetadata + (*Metadata)(nil), // 4: backup.v1.Metadata + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp } ) var file_managementpb_backup_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 2, // 0: backup.v1.Metadata.file_list:type_name -> backup.v1.File + 5, // 1: backup.v1.Metadata.restore_to:type_name -> google.protobuf.Timestamp + 3, // 2: backup.v1.Metadata.pbm_metadata:type_name -> backup.v1.PbmMetadata + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_managementpb_backup_common_proto_init() } @@ -185,19 +411,61 @@ func file_managementpb_backup_common_proto_init() { if File_managementpb_backup_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_managementpb_backup_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_backup_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PbmMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_backup_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_managementpb_backup_common_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Metadata_PbmMetadata)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_managementpb_backup_common_proto_rawDesc, NumEnums: 2, - NumMessages: 0, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, GoTypes: file_managementpb_backup_common_proto_goTypes, DependencyIndexes: file_managementpb_backup_common_proto_depIdxs, EnumInfos: file_managementpb_backup_common_proto_enumTypes, + MessageInfos: file_managementpb_backup_common_proto_msgTypes, }.Build() File_managementpb_backup_common_proto = out.File file_managementpb_backup_common_proto_rawDesc = nil diff --git a/api/managementpb/backup/common.pb.validate.go b/api/managementpb/backup/common.pb.validate.go index d077796b8d..6175160be1 100644 --- a/api/managementpb/backup/common.pb.validate.go +++ b/api/managementpb/backup/common.pb.validate.go @@ -34,3 +34,423 @@ var ( _ = anypb.Any{} _ = sort.Sort ) + +// Validate checks the field values on File with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *File) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on File with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in FileMultiError, or nil if none found. +func (m *File) ValidateAll() error { + return m.validate(true) +} + +func (m *File) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetName()) < 1 { + err := FileValidationError{ + field: "Name", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + // no validation rules for IsDirectory + + if len(errors) > 0 { + return FileMultiError(errors) + } + + return nil +} + +// FileMultiError is an error wrapping multiple validation errors returned by +// File.ValidateAll() if the designated constraints aren't met. +type FileMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FileMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FileMultiError) AllErrors() []error { return m } + +// FileValidationError is the validation error returned by File.Validate if the +// designated constraints aren't met. +type FileValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FileValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FileValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FileValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FileValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FileValidationError) ErrorName() string { return "FileValidationError" } + +// Error satisfies the builtin error interface +func (e FileValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFile.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FileValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FileValidationError{} + +// Validate checks the field values on PbmMetadata with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PbmMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PbmMetadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PbmMetadataMultiError, or +// nil if none found. +func (m *PbmMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *PbmMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if len(errors) > 0 { + return PbmMetadataMultiError(errors) + } + + return nil +} + +// PbmMetadataMultiError is an error wrapping multiple validation errors +// returned by PbmMetadata.ValidateAll() if the designated constraints aren't met. +type PbmMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PbmMetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PbmMetadataMultiError) AllErrors() []error { return m } + +// PbmMetadataValidationError is the validation error returned by +// PbmMetadata.Validate if the designated constraints aren't met. +type PbmMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PbmMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PbmMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PbmMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PbmMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PbmMetadataValidationError) ErrorName() string { return "PbmMetadataValidationError" } + +// Error satisfies the builtin error interface +func (e PbmMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPbmMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PbmMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PbmMetadataValidationError{} + +// Validate checks the field values on Metadata with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Metadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataMultiError, or nil +// if none found. +func (m *Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetFileList() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FileList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: fmt.Sprintf("FileList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: fmt.Sprintf("FileList[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetRestoreTo()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "RestoreTo", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "RestoreTo", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRestoreTo()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "RestoreTo", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.BackupToolMetadata.(type) { + case *Metadata_PbmMetadata: + if v == nil { + err := MetadataValidationError{ + field: "BackupToolMetadata", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPbmMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPbmMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "PbmMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return MetadataMultiError(errors) + } + + return nil +} + +// MetadataMultiError is an error wrapping multiple validation errors returned +// by Metadata.ValidateAll() if the designated constraints aren't met. +type MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMultiError) AllErrors() []error { return m } + +// MetadataValidationError is the validation error returned by +// Metadata.Validate if the designated constraints aren't met. +type MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataValidationError{} diff --git a/api/managementpb/backup/common.proto b/api/managementpb/backup/common.proto index 4816a7c502..59b56dfa3a 100644 --- a/api/managementpb/backup/common.proto +++ b/api/managementpb/backup/common.proto @@ -2,6 +2,9 @@ syntax = "proto3"; package backup.v1; +import "google/protobuf/timestamp.proto"; +import "validate/validate.proto"; + option go_package = "api/managementpb/backup;backupv1"; // DataModel is a model used for performing a backup. @@ -18,3 +21,27 @@ enum BackupMode { INCREMENTAL = 2; PITR = 3; } + +// File represents file or folder on a storage. +message File { + string name = 1 [(validate.rules).string.min_len = 1]; + bool is_directory = 2; +} + +// PbmMetadata contains additional data for pbm cli tools. +message PbmMetadata { + // Name of backup in backup tool representation. + string name = 1; +} + +// Metadata contains extra artifact data like files it consists of, tool specific data, etc. +message Metadata { + // List of files backup consists of. + repeated File file_list = 1; + // Exact time DB can be restored to. + google.protobuf.Timestamp restore_to = 2; + // Extra data for backup tools. + oneof backup_tool_metadata { + PbmMetadata pbm_metadata = 3; + } +} diff --git a/api/managementpb/backup/json/backup.json b/api/managementpb/backup/json/backup.json index 0089de4897..dd5fd1afe0 100644 --- a/api/managementpb/backup/json/backup.json +++ b/api/managementpb/backup/json/backup.json @@ -136,6 +136,16 @@ ], "x-order": 7 }, + "folder": { + "description": "Folder to store artifact on a storage.", + "type": "string", + "x-order": 12 + }, + "is_sharded_cluster": { + "description": "Source database setup type.", + "type": "boolean", + "x-order": 11 + }, "location_id": { "description": "Machine-readable location ID.", "type": "string", @@ -146,6 +156,54 @@ "type": "string", "x-order": 4 }, + "metadata_list": { + "description": "List of artifact metadata.", + "type": "array", + "items": { + "description": "Metadata contains extra artifact data like files it consists of, tool specific data, etc.", + "type": "object", + "properties": { + "file_list": { + "description": "List of files backup consists of.", + "type": "array", + "items": { + "description": "File represents file or folder on a storage.", + "type": "object", + "properties": { + "is_directory": { + "type": "boolean", + "x-order": 1 + }, + "name": { + "type": "string", + "x-order": 0 + } + } + }, + "x-order": 0 + }, + "pbm_metadata": { + "description": "PbmMetadata contains additional data for pbm cli tools.", + "type": "object", + "properties": { + "name": { + "description": "Name of backup in backup tool representation.", + "type": "string", + "x-order": 0 + } + }, + "x-order": 2 + }, + "restore_to": { + "description": "Exact time DB can be restored to.", + "type": "string", + "format": "date-time", + "x-order": 1 + } + } + }, + "x-order": 13 + }, "mode": { "description": "BackupMode specifies backup mode.", "type": "string", @@ -185,7 +243,8 @@ "BACKUP_STATUS_SUCCESS", "BACKUP_STATUS_ERROR", "BACKUP_STATUS_DELETING", - "BACKUP_STATUS_FAILED_TO_DELETE" + "BACKUP_STATUS_FAILED_TO_DELETE", + "BACKUP_STATUS_CLEANUP_IN_PROGRESS" ], "x-order": 8 }, @@ -782,6 +841,11 @@ "type": "boolean", "x-order": 11 }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 18 + }, "last_run": { "description": "Last run.", "type": "string", @@ -1094,6 +1158,11 @@ "type": "boolean", "x-order": 8 }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 12 + }, "location_id": { "description": "Machine-readable location ID.", "type": "string", @@ -1227,6 +1296,11 @@ "type": "string", "x-order": 3 }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 7 + }, "location_id": { "description": "Machine-readable location ID.", "type": "string", diff --git a/api/managementpb/backup/json/client/artifacts/list_artifacts_responses.go b/api/managementpb/backup/json/client/artifacts/list_artifacts_responses.go index ddd8dd676b..c9d51fc1fc 100644 --- a/api/managementpb/backup/json/client/artifacts/list_artifacts_responses.go +++ b/api/managementpb/backup/json/client/artifacts/list_artifacts_responses.go @@ -390,7 +390,7 @@ type ListArtifactsOKBodyArtifactsItems0 struct { DataModel *string `json:"data_model,omitempty"` // BackupStatus shows the current status of execution of backup. - // Enum: [BACKUP_STATUS_INVALID BACKUP_STATUS_PENDING BACKUP_STATUS_IN_PROGRESS BACKUP_STATUS_PAUSED BACKUP_STATUS_SUCCESS BACKUP_STATUS_ERROR BACKUP_STATUS_DELETING BACKUP_STATUS_FAILED_TO_DELETE] + // Enum: [BACKUP_STATUS_INVALID BACKUP_STATUS_PENDING BACKUP_STATUS_IN_PROGRESS BACKUP_STATUS_PAUSED BACKUP_STATUS_SUCCESS BACKUP_STATUS_ERROR BACKUP_STATUS_DELETING BACKUP_STATUS_FAILED_TO_DELETE BACKUP_STATUS_CLEANUP_IN_PROGRESS] Status *string `json:"status,omitempty"` // Artifact creation time. @@ -400,6 +400,15 @@ type ListArtifactsOKBodyArtifactsItems0 struct { // BackupMode specifies backup mode. // Enum: [BACKUP_MODE_INVALID SNAPSHOT INCREMENTAL PITR] Mode *string `json:"mode,omitempty"` + + // Source database setup type. + IsShardedCluster bool `json:"is_sharded_cluster,omitempty"` + + // Folder to store artifact on a storage. + Folder string `json:"folder,omitempty"` + + // List of artifact metadata. + MetadataList []*ListArtifactsOKBodyArtifactsItems0MetadataListItems0 `json:"metadata_list"` } // Validate validates this list artifacts OK body artifacts items0 @@ -422,6 +431,10 @@ func (o *ListArtifactsOKBodyArtifactsItems0) Validate(formats strfmt.Registry) e res = append(res, err) } + if err := o.validateMetadataList(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -477,7 +490,7 @@ var listArtifactsOkBodyArtifactsItems0TypeStatusPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["BACKUP_STATUS_INVALID","BACKUP_STATUS_PENDING","BACKUP_STATUS_IN_PROGRESS","BACKUP_STATUS_PAUSED","BACKUP_STATUS_SUCCESS","BACKUP_STATUS_ERROR","BACKUP_STATUS_DELETING","BACKUP_STATUS_FAILED_TO_DELETE"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["BACKUP_STATUS_INVALID","BACKUP_STATUS_PENDING","BACKUP_STATUS_IN_PROGRESS","BACKUP_STATUS_PAUSED","BACKUP_STATUS_SUCCESS","BACKUP_STATUS_ERROR","BACKUP_STATUS_DELETING","BACKUP_STATUS_FAILED_TO_DELETE","BACKUP_STATUS_CLEANUP_IN_PROGRESS"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -510,6 +523,9 @@ const ( // ListArtifactsOKBodyArtifactsItems0StatusBACKUPSTATUSFAILEDTODELETE captures enum value "BACKUP_STATUS_FAILED_TO_DELETE" ListArtifactsOKBodyArtifactsItems0StatusBACKUPSTATUSFAILEDTODELETE string = "BACKUP_STATUS_FAILED_TO_DELETE" + + // ListArtifactsOKBodyArtifactsItems0StatusBACKUPSTATUSCLEANUPINPROGRESS captures enum value "BACKUP_STATUS_CLEANUP_IN_PROGRESS" + ListArtifactsOKBodyArtifactsItems0StatusBACKUPSTATUSCLEANUPINPROGRESS string = "BACKUP_STATUS_CLEANUP_IN_PROGRESS" ) // prop value enum @@ -593,8 +609,60 @@ func (o *ListArtifactsOKBodyArtifactsItems0) validateMode(formats strfmt.Registr return nil } -// ContextValidate validates this list artifacts OK body artifacts items0 based on context it is used +func (o *ListArtifactsOKBodyArtifactsItems0) validateMetadataList(formats strfmt.Registry) error { + if swag.IsZero(o.MetadataList) { // not required + return nil + } + + for i := 0; i < len(o.MetadataList); i++ { + if swag.IsZero(o.MetadataList[i]) { // not required + continue + } + + if o.MetadataList[i] != nil { + if err := o.MetadataList[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metadata_list" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metadata_list" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this list artifacts OK body artifacts items0 based on the context it is used func (o *ListArtifactsOKBodyArtifactsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateMetadataList(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0) contextValidateMetadataList(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.MetadataList); i++ { + if o.MetadataList[i] != nil { + if err := o.MetadataList[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metadata_list" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metadata_list" + "." + strconv.Itoa(i)) + } + return err + } + } + } + return nil } @@ -615,3 +683,243 @@ func (o *ListArtifactsOKBodyArtifactsItems0) UnmarshalBinary(b []byte) error { *o = res return nil } + +/* +ListArtifactsOKBodyArtifactsItems0MetadataListItems0 Metadata contains extra artifact data like files it consists of, tool specific data, etc. +swagger:model ListArtifactsOKBodyArtifactsItems0MetadataListItems0 +*/ +type ListArtifactsOKBodyArtifactsItems0MetadataListItems0 struct { + // List of files backup consists of. + FileList []*ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0 `json:"file_list"` + + // Exact time DB can be restored to. + // Format: date-time + RestoreTo strfmt.DateTime `json:"restore_to,omitempty"` + + // pbm metadata + PbmMetadata *ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata `json:"pbm_metadata,omitempty"` +} + +// Validate validates this list artifacts OK body artifacts items0 metadata list items0 +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateFileList(formats); err != nil { + res = append(res, err) + } + + if err := o.validateRestoreTo(formats); err != nil { + res = append(res, err) + } + + if err := o.validatePbmMetadata(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) validateFileList(formats strfmt.Registry) error { + if swag.IsZero(o.FileList) { // not required + return nil + } + + for i := 0; i < len(o.FileList); i++ { + if swag.IsZero(o.FileList[i]) { // not required + continue + } + + if o.FileList[i] != nil { + if err := o.FileList[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("file_list" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("file_list" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) validateRestoreTo(formats strfmt.Registry) error { + if swag.IsZero(o.RestoreTo) { // not required + return nil + } + + if err := validate.FormatOf("restore_to", "body", "date-time", o.RestoreTo.String(), formats); err != nil { + return err + } + + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) validatePbmMetadata(formats strfmt.Registry) error { + if swag.IsZero(o.PbmMetadata) { // not required + return nil + } + + if o.PbmMetadata != nil { + if err := o.PbmMetadata.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pbm_metadata") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("pbm_metadata") + } + return err + } + } + + return nil +} + +// ContextValidate validate this list artifacts OK body artifacts items0 metadata list items0 based on the context it is used +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateFileList(ctx, formats); err != nil { + res = append(res, err) + } + + if err := o.contextValidatePbmMetadata(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) contextValidateFileList(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.FileList); i++ { + if o.FileList[i] != nil { + if err := o.FileList[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("file_list" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("file_list" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) contextValidatePbmMetadata(ctx context.Context, formats strfmt.Registry) error { + if o.PbmMetadata != nil { + if err := o.PbmMetadata.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pbm_metadata") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("pbm_metadata") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0) UnmarshalBinary(b []byte) error { + var res ListArtifactsOKBodyArtifactsItems0MetadataListItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0 File represents file or folder on a storage. +swagger:model ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0 +*/ +type ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0 struct { + // name + Name string `json:"name,omitempty"` + + // is directory + IsDirectory bool `json:"is_directory,omitempty"` +} + +// Validate validates this list artifacts OK body artifacts items0 metadata list items0 file list items0 +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list artifacts OK body artifacts items0 metadata list items0 file list items0 based on context it is used +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0) UnmarshalBinary(b []byte) error { + var res ListArtifactsOKBodyArtifactsItems0MetadataListItems0FileListItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata PbmMetadata contains additional data for pbm cli tools. +swagger:model ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata +*/ +type ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata struct { + // Name of backup in backup tool representation. + Name string `json:"name,omitempty"` +} + +// Validate validates this list artifacts OK body artifacts items0 metadata list items0 pbm metadata +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list artifacts OK body artifacts items0 metadata list items0 pbm metadata based on context it is used +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata) UnmarshalBinary(b []byte) error { + var res ListArtifactsOKBodyArtifactsItems0MetadataListItems0PbmMetadata + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/api/managementpb/backup/json/client/backups/list_scheduled_backups_responses.go b/api/managementpb/backup/json/client/backups/list_scheduled_backups_responses.go index caccf8b760..dc4448f868 100644 --- a/api/managementpb/backup/json/client/backups/list_scheduled_backups_responses.go +++ b/api/managementpb/backup/json/client/backups/list_scheduled_backups_responses.go @@ -422,6 +422,9 @@ type ListScheduledBackupsOKBodyScheduledBackupsItems0 struct { // BackupMode specifies backup mode. // Enum: [BACKUP_MODE_INVALID SNAPSHOT INCREMENTAL PITR] Mode *string `json:"mode,omitempty"` + + // Folder on storage for artifact. + Folder string `json:"folder,omitempty"` } // Validate validates this list scheduled backups OK body scheduled backups items0 diff --git a/api/managementpb/backup/json/client/backups/schedule_backup_responses.go b/api/managementpb/backup/json/client/backups/schedule_backup_responses.go index 4753ffc4b3..dc5aff4818 100644 --- a/api/managementpb/backup/json/client/backups/schedule_backup_responses.go +++ b/api/managementpb/backup/json/client/backups/schedule_backup_responses.go @@ -163,6 +163,9 @@ type ScheduleBackupBody struct { // DataModel is a model used for performing a backup. // Enum: [DATA_MODEL_INVALID PHYSICAL LOGICAL] DataModel *string `json:"data_model,omitempty"` + + // Folder on storage for artifact. + Folder string `json:"folder,omitempty"` } // Validate validates this schedule backup body diff --git a/api/managementpb/backup/json/client/backups/start_backup_responses.go b/api/managementpb/backup/json/client/backups/start_backup_responses.go index 6c4786db9d..95949e6af9 100644 --- a/api/managementpb/backup/json/client/backups/start_backup_responses.go +++ b/api/managementpb/backup/json/client/backups/start_backup_responses.go @@ -146,6 +146,9 @@ type StartBackupBody struct { // DataModel is a model used for performing a backup. // Enum: [DATA_MODEL_INVALID PHYSICAL LOGICAL] DataModel *string `json:"data_model,omitempty"` + + // Folder on storage for artifact. + Folder string `json:"folder,omitempty"` } // Validate validates this start backup body diff --git a/api/managementpb/ia/alerts.pb.validate.go b/api/managementpb/ia/alerts.pb.validate.go index a574608768..82f050f980 100644 --- a/api/managementpb/ia/alerts.pb.validate.go +++ b/api/managementpb/ia/alerts.pb.validate.go @@ -36,7 +36,7 @@ var ( _ = anypb.Any{} _ = sort.Sort - _ = managementpb.BooleanFlag(0) + _ = managementpb.Severity(0) ) // Validate checks the field values on Alert with the rules defined in the diff --git a/api/managementpb/ia/rules.pb.validate.go b/api/managementpb/ia/rules.pb.validate.go index 59dd9fb45f..03d92e46f4 100644 --- a/api/managementpb/ia/rules.pb.validate.go +++ b/api/managementpb/ia/rules.pb.validate.go @@ -39,7 +39,7 @@ var ( _ = alertingv1.ParamType(0) - _ = managementpb.BooleanFlag(0) + _ = managementpb.Severity(0) ) // Validate checks the field values on Filter with the rules defined in the diff --git a/api/managementpb/node/json/client/mgmt_node/get_node_parameters.go b/api/managementpb/node/json/client/mgmt_node/get_node_parameters.go new file mode 100644 index 0000000000..01d945a85b --- /dev/null +++ b/api/managementpb/node/json/client/mgmt_node/get_node_parameters.go @@ -0,0 +1,144 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package mgmt_node + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGetNodeParams creates a new GetNodeParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetNodeParams() *GetNodeParams { + return &GetNodeParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetNodeParamsWithTimeout creates a new GetNodeParams object +// with the ability to set a timeout on a request. +func NewGetNodeParamsWithTimeout(timeout time.Duration) *GetNodeParams { + return &GetNodeParams{ + timeout: timeout, + } +} + +// NewGetNodeParamsWithContext creates a new GetNodeParams object +// with the ability to set a context for a request. +func NewGetNodeParamsWithContext(ctx context.Context) *GetNodeParams { + return &GetNodeParams{ + Context: ctx, + } +} + +// NewGetNodeParamsWithHTTPClient creates a new GetNodeParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetNodeParamsWithHTTPClient(client *http.Client) *GetNodeParams { + return &GetNodeParams{ + HTTPClient: client, + } +} + +/* +GetNodeParams contains all the parameters to send to the API endpoint + + for the get node operation. + + Typically these are written to a http.Request. +*/ +type GetNodeParams struct { + // Body. + Body GetNodeBody + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get node params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetNodeParams) WithDefaults() *GetNodeParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get node params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetNodeParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get node params +func (o *GetNodeParams) WithTimeout(timeout time.Duration) *GetNodeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get node params +func (o *GetNodeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get node params +func (o *GetNodeParams) WithContext(ctx context.Context) *GetNodeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get node params +func (o *GetNodeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get node params +func (o *GetNodeParams) WithHTTPClient(client *http.Client) *GetNodeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get node params +func (o *GetNodeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the get node params +func (o *GetNodeParams) WithBody(body GetNodeBody) *GetNodeParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the get node params +func (o *GetNodeParams) SetBody(body GetNodeBody) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *GetNodeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/api/managementpb/node/json/client/mgmt_node/get_node_responses.go b/api/managementpb/node/json/client/mgmt_node/get_node_responses.go new file mode 100644 index 0000000000..8168ec3df5 --- /dev/null +++ b/api/managementpb/node/json/client/mgmt_node/get_node_responses.go @@ -0,0 +1,766 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package mgmt_node + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// GetNodeReader is a Reader for the GetNode structure. +type GetNodeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetNodeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetNodeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGetNodeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGetNodeOK creates a GetNodeOK with default headers values +func NewGetNodeOK() *GetNodeOK { + return &GetNodeOK{} +} + +/* +GetNodeOK describes a response with status code 200, with default header values. + +A successful response. +*/ +type GetNodeOK struct { + Payload *GetNodeOKBody +} + +func (o *GetNodeOK) Error() string { + return fmt.Sprintf("[POST /v1/management/Node/Get][%d] getNodeOk %+v", 200, o.Payload) +} + +func (o *GetNodeOK) GetPayload() *GetNodeOKBody { + return o.Payload +} + +func (o *GetNodeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(GetNodeOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetNodeDefault creates a GetNodeDefault with default headers values +func NewGetNodeDefault(code int) *GetNodeDefault { + return &GetNodeDefault{ + _statusCode: code, + } +} + +/* +GetNodeDefault describes a response with status code -1, with default header values. + +An unexpected error response. +*/ +type GetNodeDefault struct { + _statusCode int + + Payload *GetNodeDefaultBody +} + +// Code gets the status code for the get node default response +func (o *GetNodeDefault) Code() int { + return o._statusCode +} + +func (o *GetNodeDefault) Error() string { + return fmt.Sprintf("[POST /v1/management/Node/Get][%d] GetNode default %+v", o._statusCode, o.Payload) +} + +func (o *GetNodeDefault) GetPayload() *GetNodeDefaultBody { + return o.Payload +} + +func (o *GetNodeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(GetNodeDefaultBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +GetNodeBody get node body +swagger:model GetNodeBody +*/ +type GetNodeBody struct { + // Unique Node identifier. + NodeID string `json:"node_id,omitempty"` +} + +// Validate validates this get node body +func (o *GetNodeBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get node body based on context it is used +func (o *GetNodeBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeBody) UnmarshalBinary(b []byte) error { + var res GetNodeBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeDefaultBody get node default body +swagger:model GetNodeDefaultBody +*/ +type GetNodeDefaultBody struct { + // code + Code int32 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` + + // details + Details []*GetNodeDefaultBodyDetailsItems0 `json:"details"` +} + +// Validate validates this get node default body +func (o *GetNodeDefaultBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateDetails(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeDefaultBody) validateDetails(formats strfmt.Registry) error { + if swag.IsZero(o.Details) { // not required + return nil + } + + for i := 0; i < len(o.Details); i++ { + if swag.IsZero(o.Details[i]) { // not required + continue + } + + if o.Details[i] != nil { + if err := o.Details[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("GetNode default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("GetNode default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this get node default body based on the context it is used +func (o *GetNodeDefaultBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateDetails(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeDefaultBody) contextValidateDetails(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Details); i++ { + if o.Details[i] != nil { + if err := o.Details[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("GetNode default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("GetNode default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeDefaultBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeDefaultBody) UnmarshalBinary(b []byte) error { + var res GetNodeDefaultBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeDefaultBodyDetailsItems0 get node default body details items0 +swagger:model GetNodeDefaultBodyDetailsItems0 +*/ +type GetNodeDefaultBodyDetailsItems0 struct { + // at type + AtType string `json:"@type,omitempty"` +} + +// Validate validates this get node default body details items0 +func (o *GetNodeDefaultBodyDetailsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get node default body details items0 based on context it is used +func (o *GetNodeDefaultBodyDetailsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeDefaultBodyDetailsItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeDefaultBodyDetailsItems0) UnmarshalBinary(b []byte) error { + var res GetNodeDefaultBodyDetailsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeOKBody get node OK body +swagger:model GetNodeOKBody +*/ +type GetNodeOKBody struct { + // node + Node *GetNodeOKBodyNode `json:"node,omitempty"` +} + +// Validate validates this get node OK body +func (o *GetNodeOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateNode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeOKBody) validateNode(formats strfmt.Registry) error { + if swag.IsZero(o.Node) { // not required + return nil + } + + if o.Node != nil { + if err := o.Node.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node") + } + return err + } + } + + return nil +} + +// ContextValidate validate this get node OK body based on the context it is used +func (o *GetNodeOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateNode(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeOKBody) contextValidateNode(ctx context.Context, formats strfmt.Registry) error { + if o.Node != nil { + if err := o.Node.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeOKBody) UnmarshalBinary(b []byte) error { + var res GetNodeOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeOKBodyNode get node OK body node +swagger:model GetNodeOKBodyNode +*/ +type GetNodeOKBodyNode struct { + // Unique Node identifier. + NodeID string `json:"node_id,omitempty"` + + // Node type. + NodeType string `json:"node_type,omitempty"` + + // User-defined node name. + NodeName string `json:"node_name,omitempty"` + + // Linux machine-id. + MachineID string `json:"machine_id,omitempty"` + + // Linux distribution name and version. + Distro string `json:"distro,omitempty"` + + // Node model. + NodeModel string `json:"node_model,omitempty"` + + // A node's unique docker container identifier. + ContainerID string `json:"container_id,omitempty"` + + // Container name. + ContainerName string `json:"container_name,omitempty"` + + // Node address (DNS name or IP). + Address string `json:"address,omitempty"` + + // Node region. + Region string `json:"region,omitempty"` + + // Node availability zone. + Az string `json:"az,omitempty"` + + // Custom user-assigned labels for Node. + CustomLabels map[string]string `json:"custom_labels,omitempty"` + + // Creation timestamp. + // Format: date-time + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + + // Last update timestamp. + // Format: date-time + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"` + + // Node status. + // + // - STATUS_INVALID: Invalid status. + // - UP: The node is up. + // - DOWN: The node is down. + // - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet). + // Enum: [STATUS_INVALID UP DOWN UNKNOWN] + Status *string `json:"status,omitempty"` + + // List of agents related to this node. + Agents []*GetNodeOKBodyNodeAgentsItems0 `json:"agents"` + + // List of services running on this node. + Services []*GetNodeOKBodyNodeServicesItems0 `json:"services"` +} + +// Validate validates this get node OK body node +func (o *GetNodeOKBodyNode) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := o.validateUpdatedAt(formats); err != nil { + res = append(res, err) + } + + if err := o.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := o.validateAgents(formats); err != nil { + res = append(res, err) + } + + if err := o.validateServices(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeOKBodyNode) validateCreatedAt(formats strfmt.Registry) error { + if swag.IsZero(o.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("getNodeOk"+"."+"node"+"."+"created_at", "body", "date-time", o.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (o *GetNodeOKBodyNode) validateUpdatedAt(formats strfmt.Registry) error { + if swag.IsZero(o.UpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("getNodeOk"+"."+"node"+"."+"updated_at", "body", "date-time", o.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +var getNodeOkBodyNodeTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STATUS_INVALID","UP","DOWN","UNKNOWN"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + getNodeOkBodyNodeTypeStatusPropEnum = append(getNodeOkBodyNodeTypeStatusPropEnum, v) + } +} + +const ( + + // GetNodeOKBodyNodeStatusSTATUSINVALID captures enum value "STATUS_INVALID" + GetNodeOKBodyNodeStatusSTATUSINVALID string = "STATUS_INVALID" + + // GetNodeOKBodyNodeStatusUP captures enum value "UP" + GetNodeOKBodyNodeStatusUP string = "UP" + + // GetNodeOKBodyNodeStatusDOWN captures enum value "DOWN" + GetNodeOKBodyNodeStatusDOWN string = "DOWN" + + // GetNodeOKBodyNodeStatusUNKNOWN captures enum value "UNKNOWN" + GetNodeOKBodyNodeStatusUNKNOWN string = "UNKNOWN" +) + +// prop value enum +func (o *GetNodeOKBodyNode) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, getNodeOkBodyNodeTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *GetNodeOKBodyNode) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(o.Status) { // not required + return nil + } + + // value enum + if err := o.validateStatusEnum("getNodeOk"+"."+"node"+"."+"status", "body", *o.Status); err != nil { + return err + } + + return nil +} + +func (o *GetNodeOKBodyNode) validateAgents(formats strfmt.Registry) error { + if swag.IsZero(o.Agents) { // not required + return nil + } + + for i := 0; i < len(o.Agents); i++ { + if swag.IsZero(o.Agents[i]) { // not required + continue + } + + if o.Agents[i] != nil { + if err := o.Agents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node" + "." + "agents" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node" + "." + "agents" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (o *GetNodeOKBodyNode) validateServices(formats strfmt.Registry) error { + if swag.IsZero(o.Services) { // not required + return nil + } + + for i := 0; i < len(o.Services); i++ { + if swag.IsZero(o.Services[i]) { // not required + continue + } + + if o.Services[i] != nil { + if err := o.Services[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node" + "." + "services" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node" + "." + "services" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this get node OK body node based on the context it is used +func (o *GetNodeOKBodyNode) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateAgents(ctx, formats); err != nil { + res = append(res, err) + } + + if err := o.contextValidateServices(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetNodeOKBodyNode) contextValidateAgents(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Agents); i++ { + if o.Agents[i] != nil { + if err := o.Agents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node" + "." + "agents" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node" + "." + "agents" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +func (o *GetNodeOKBodyNode) contextValidateServices(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Services); i++ { + if o.Services[i] != nil { + if err := o.Services[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getNodeOk" + "." + "node" + "." + "services" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getNodeOk" + "." + "node" + "." + "services" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeOKBodyNode) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeOKBodyNode) UnmarshalBinary(b []byte) error { + var res GetNodeOKBodyNode + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeOKBodyNodeAgentsItems0 get node OK body node agents items0 +swagger:model GetNodeOKBodyNodeAgentsItems0 +*/ +type GetNodeOKBodyNodeAgentsItems0 struct { + // Unique Agent identifier. + AgentID string `json:"agent_id,omitempty"` + + // Agent type. + AgentType string `json:"agent_type,omitempty"` + + // Actual Agent status. + Status string `json:"status,omitempty"` + + // True if Agent is running and connected to pmm-managed. + IsConnected bool `json:"is_connected,omitempty"` +} + +// Validate validates this get node OK body node agents items0 +func (o *GetNodeOKBodyNodeAgentsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get node OK body node agents items0 based on context it is used +func (o *GetNodeOKBodyNodeAgentsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeOKBodyNodeAgentsItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeOKBodyNodeAgentsItems0) UnmarshalBinary(b []byte) error { + var res GetNodeOKBodyNodeAgentsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +GetNodeOKBodyNodeServicesItems0 Service represents a service running on a node. +swagger:model GetNodeOKBodyNodeServicesItems0 +*/ +type GetNodeOKBodyNodeServicesItems0 struct { + // Unique Service identifier. + ServiceID string `json:"service_id,omitempty"` + + // Service type. + ServiceType string `json:"service_type,omitempty"` + + // Service name. + ServiceName string `json:"service_name,omitempty"` +} + +// Validate validates this get node OK body node services items0 +func (o *GetNodeOKBodyNodeServicesItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get node OK body node services items0 based on context it is used +func (o *GetNodeOKBodyNodeServicesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetNodeOKBodyNodeServicesItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetNodeOKBodyNodeServicesItems0) UnmarshalBinary(b []byte) error { + var res GetNodeOKBodyNodeServicesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/api/managementpb/node/json/client/mgmt_node/list_nodes_parameters.go b/api/managementpb/node/json/client/mgmt_node/list_nodes_parameters.go new file mode 100644 index 0000000000..b60cd62ce8 --- /dev/null +++ b/api/managementpb/node/json/client/mgmt_node/list_nodes_parameters.go @@ -0,0 +1,144 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package mgmt_node + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewListNodesParams creates a new ListNodesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewListNodesParams() *ListNodesParams { + return &ListNodesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewListNodesParamsWithTimeout creates a new ListNodesParams object +// with the ability to set a timeout on a request. +func NewListNodesParamsWithTimeout(timeout time.Duration) *ListNodesParams { + return &ListNodesParams{ + timeout: timeout, + } +} + +// NewListNodesParamsWithContext creates a new ListNodesParams object +// with the ability to set a context for a request. +func NewListNodesParamsWithContext(ctx context.Context) *ListNodesParams { + return &ListNodesParams{ + Context: ctx, + } +} + +// NewListNodesParamsWithHTTPClient creates a new ListNodesParams object +// with the ability to set a custom HTTPClient for a request. +func NewListNodesParamsWithHTTPClient(client *http.Client) *ListNodesParams { + return &ListNodesParams{ + HTTPClient: client, + } +} + +/* +ListNodesParams contains all the parameters to send to the API endpoint + + for the list nodes operation. + + Typically these are written to a http.Request. +*/ +type ListNodesParams struct { + // Body. + Body ListNodesBody + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the list nodes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListNodesParams) WithDefaults() *ListNodesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the list nodes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListNodesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the list nodes params +func (o *ListNodesParams) WithTimeout(timeout time.Duration) *ListNodesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list nodes params +func (o *ListNodesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list nodes params +func (o *ListNodesParams) WithContext(ctx context.Context) *ListNodesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list nodes params +func (o *ListNodesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list nodes params +func (o *ListNodesParams) WithHTTPClient(client *http.Client) *ListNodesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list nodes params +func (o *ListNodesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the list nodes params +func (o *ListNodesParams) WithBody(body ListNodesBody) *ListNodesParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the list nodes params +func (o *ListNodesParams) SetBody(body ListNodesBody) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ListNodesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/api/managementpb/node/json/client/mgmt_node/list_nodes_responses.go b/api/managementpb/node/json/client/mgmt_node/list_nodes_responses.go new file mode 100644 index 0000000000..26974fdbac --- /dev/null +++ b/api/managementpb/node/json/client/mgmt_node/list_nodes_responses.go @@ -0,0 +1,839 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package mgmt_node + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ListNodesReader is a Reader for the ListNodes structure. +type ListNodesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListNodesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListNodesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewListNodesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewListNodesOK creates a ListNodesOK with default headers values +func NewListNodesOK() *ListNodesOK { + return &ListNodesOK{} +} + +/* +ListNodesOK describes a response with status code 200, with default header values. + +A successful response. +*/ +type ListNodesOK struct { + Payload *ListNodesOKBody +} + +func (o *ListNodesOK) Error() string { + return fmt.Sprintf("[POST /v1/management/Node/List][%d] listNodesOk %+v", 200, o.Payload) +} + +func (o *ListNodesOK) GetPayload() *ListNodesOKBody { + return o.Payload +} + +func (o *ListNodesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(ListNodesOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListNodesDefault creates a ListNodesDefault with default headers values +func NewListNodesDefault(code int) *ListNodesDefault { + return &ListNodesDefault{ + _statusCode: code, + } +} + +/* +ListNodesDefault describes a response with status code -1, with default header values. + +An unexpected error response. +*/ +type ListNodesDefault struct { + _statusCode int + + Payload *ListNodesDefaultBody +} + +// Code gets the status code for the list nodes default response +func (o *ListNodesDefault) Code() int { + return o._statusCode +} + +func (o *ListNodesDefault) Error() string { + return fmt.Sprintf("[POST /v1/management/Node/List][%d] ListNodes default %+v", o._statusCode, o.Payload) +} + +func (o *ListNodesDefault) GetPayload() *ListNodesDefaultBody { + return o.Payload +} + +func (o *ListNodesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(ListNodesDefaultBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +ListNodesBody list nodes body +swagger:model ListNodesBody +*/ +type ListNodesBody struct { + // NodeType describes supported Node types. + // Enum: [NODE_TYPE_INVALID GENERIC_NODE CONTAINER_NODE REMOTE_NODE REMOTE_RDS_NODE REMOTE_AZURE_DATABASE_NODE] + NodeType *string `json:"node_type,omitempty"` +} + +// Validate validates this list nodes body +func (o *ListNodesBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateNodeType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var listNodesBodyTypeNodeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["NODE_TYPE_INVALID","GENERIC_NODE","CONTAINER_NODE","REMOTE_NODE","REMOTE_RDS_NODE","REMOTE_AZURE_DATABASE_NODE"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listNodesBodyTypeNodeTypePropEnum = append(listNodesBodyTypeNodeTypePropEnum, v) + } +} + +const ( + + // ListNodesBodyNodeTypeNODETYPEINVALID captures enum value "NODE_TYPE_INVALID" + ListNodesBodyNodeTypeNODETYPEINVALID string = "NODE_TYPE_INVALID" + + // ListNodesBodyNodeTypeGENERICNODE captures enum value "GENERIC_NODE" + ListNodesBodyNodeTypeGENERICNODE string = "GENERIC_NODE" + + // ListNodesBodyNodeTypeCONTAINERNODE captures enum value "CONTAINER_NODE" + ListNodesBodyNodeTypeCONTAINERNODE string = "CONTAINER_NODE" + + // ListNodesBodyNodeTypeREMOTENODE captures enum value "REMOTE_NODE" + ListNodesBodyNodeTypeREMOTENODE string = "REMOTE_NODE" + + // ListNodesBodyNodeTypeREMOTERDSNODE captures enum value "REMOTE_RDS_NODE" + ListNodesBodyNodeTypeREMOTERDSNODE string = "REMOTE_RDS_NODE" + + // ListNodesBodyNodeTypeREMOTEAZUREDATABASENODE captures enum value "REMOTE_AZURE_DATABASE_NODE" + ListNodesBodyNodeTypeREMOTEAZUREDATABASENODE string = "REMOTE_AZURE_DATABASE_NODE" +) + +// prop value enum +func (o *ListNodesBody) validateNodeTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listNodesBodyTypeNodeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListNodesBody) validateNodeType(formats strfmt.Registry) error { + if swag.IsZero(o.NodeType) { // not required + return nil + } + + // value enum + if err := o.validateNodeTypeEnum("body"+"."+"node_type", "body", *o.NodeType); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this list nodes body based on context it is used +func (o *ListNodesBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesBody) UnmarshalBinary(b []byte) error { + var res ListNodesBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesDefaultBody list nodes default body +swagger:model ListNodesDefaultBody +*/ +type ListNodesDefaultBody struct { + // code + Code int32 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` + + // details + Details []*ListNodesDefaultBodyDetailsItems0 `json:"details"` +} + +// Validate validates this list nodes default body +func (o *ListNodesDefaultBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateDetails(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesDefaultBody) validateDetails(formats strfmt.Registry) error { + if swag.IsZero(o.Details) { // not required + return nil + } + + for i := 0; i < len(o.Details); i++ { + if swag.IsZero(o.Details[i]) { // not required + continue + } + + if o.Details[i] != nil { + if err := o.Details[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ListNodes default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ListNodes default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this list nodes default body based on the context it is used +func (o *ListNodesDefaultBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateDetails(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesDefaultBody) contextValidateDetails(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Details); i++ { + if o.Details[i] != nil { + if err := o.Details[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ListNodes default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ListNodes default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesDefaultBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesDefaultBody) UnmarshalBinary(b []byte) error { + var res ListNodesDefaultBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesDefaultBodyDetailsItems0 list nodes default body details items0 +swagger:model ListNodesDefaultBodyDetailsItems0 +*/ +type ListNodesDefaultBodyDetailsItems0 struct { + // at type + AtType string `json:"@type,omitempty"` +} + +// Validate validates this list nodes default body details items0 +func (o *ListNodesDefaultBodyDetailsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list nodes default body details items0 based on context it is used +func (o *ListNodesDefaultBodyDetailsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesDefaultBodyDetailsItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesDefaultBodyDetailsItems0) UnmarshalBinary(b []byte) error { + var res ListNodesDefaultBodyDetailsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesOKBody list nodes OK body +swagger:model ListNodesOKBody +*/ +type ListNodesOKBody struct { + // nodes + Nodes []*ListNodesOKBodyNodesItems0 `json:"nodes"` +} + +// Validate validates this list nodes OK body +func (o *ListNodesOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateNodes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesOKBody) validateNodes(formats strfmt.Registry) error { + if swag.IsZero(o.Nodes) { // not required + return nil + } + + for i := 0; i < len(o.Nodes); i++ { + if swag.IsZero(o.Nodes[i]) { // not required + continue + } + + if o.Nodes[i] != nil { + if err := o.Nodes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("listNodesOk" + "." + "nodes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("listNodesOk" + "." + "nodes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this list nodes OK body based on the context it is used +func (o *ListNodesOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateNodes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesOKBody) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Nodes); i++ { + if o.Nodes[i] != nil { + if err := o.Nodes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("listNodesOk" + "." + "nodes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("listNodesOk" + "." + "nodes" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesOKBody) UnmarshalBinary(b []byte) error { + var res ListNodesOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesOKBodyNodesItems0 list nodes OK body nodes items0 +swagger:model ListNodesOKBodyNodesItems0 +*/ +type ListNodesOKBodyNodesItems0 struct { + // Unique Node identifier. + NodeID string `json:"node_id,omitempty"` + + // Node type. + NodeType string `json:"node_type,omitempty"` + + // User-defined node name. + NodeName string `json:"node_name,omitempty"` + + // Linux machine-id. + MachineID string `json:"machine_id,omitempty"` + + // Linux distribution name and version. + Distro string `json:"distro,omitempty"` + + // Node model. + NodeModel string `json:"node_model,omitempty"` + + // A node's unique docker container identifier. + ContainerID string `json:"container_id,omitempty"` + + // Container name. + ContainerName string `json:"container_name,omitempty"` + + // Node address (DNS name or IP). + Address string `json:"address,omitempty"` + + // Node region. + Region string `json:"region,omitempty"` + + // Node availability zone. + Az string `json:"az,omitempty"` + + // Custom user-assigned labels for Node. + CustomLabels map[string]string `json:"custom_labels,omitempty"` + + // Creation timestamp. + // Format: date-time + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + + // Last update timestamp. + // Format: date-time + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"` + + // Node status. + // + // - STATUS_INVALID: Invalid status. + // - UP: The node is up. + // - DOWN: The node is down. + // - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet). + // Enum: [STATUS_INVALID UP DOWN UNKNOWN] + Status *string `json:"status,omitempty"` + + // List of agents related to this node. + Agents []*ListNodesOKBodyNodesItems0AgentsItems0 `json:"agents"` + + // List of services running on this node. + Services []*ListNodesOKBodyNodesItems0ServicesItems0 `json:"services"` +} + +// Validate validates this list nodes OK body nodes items0 +func (o *ListNodesOKBodyNodesItems0) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := o.validateUpdatedAt(formats); err != nil { + res = append(res, err) + } + + if err := o.validateStatus(formats); err != nil { + res = append(res, err) + } + + if err := o.validateAgents(formats); err != nil { + res = append(res, err) + } + + if err := o.validateServices(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesOKBodyNodesItems0) validateCreatedAt(formats strfmt.Registry) error { + if swag.IsZero(o.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", o.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (o *ListNodesOKBodyNodesItems0) validateUpdatedAt(formats strfmt.Registry) error { + if swag.IsZero(o.UpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("updated_at", "body", "date-time", o.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +var listNodesOkBodyNodesItems0TypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["STATUS_INVALID","UP","DOWN","UNKNOWN"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listNodesOkBodyNodesItems0TypeStatusPropEnum = append(listNodesOkBodyNodesItems0TypeStatusPropEnum, v) + } +} + +const ( + + // ListNodesOKBodyNodesItems0StatusSTATUSINVALID captures enum value "STATUS_INVALID" + ListNodesOKBodyNodesItems0StatusSTATUSINVALID string = "STATUS_INVALID" + + // ListNodesOKBodyNodesItems0StatusUP captures enum value "UP" + ListNodesOKBodyNodesItems0StatusUP string = "UP" + + // ListNodesOKBodyNodesItems0StatusDOWN captures enum value "DOWN" + ListNodesOKBodyNodesItems0StatusDOWN string = "DOWN" + + // ListNodesOKBodyNodesItems0StatusUNKNOWN captures enum value "UNKNOWN" + ListNodesOKBodyNodesItems0StatusUNKNOWN string = "UNKNOWN" +) + +// prop value enum +func (o *ListNodesOKBodyNodesItems0) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listNodesOkBodyNodesItems0TypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListNodesOKBodyNodesItems0) validateStatus(formats strfmt.Registry) error { + if swag.IsZero(o.Status) { // not required + return nil + } + + // value enum + if err := o.validateStatusEnum("status", "body", *o.Status); err != nil { + return err + } + + return nil +} + +func (o *ListNodesOKBodyNodesItems0) validateAgents(formats strfmt.Registry) error { + if swag.IsZero(o.Agents) { // not required + return nil + } + + for i := 0; i < len(o.Agents); i++ { + if swag.IsZero(o.Agents[i]) { // not required + continue + } + + if o.Agents[i] != nil { + if err := o.Agents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("agents" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("agents" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (o *ListNodesOKBodyNodesItems0) validateServices(formats strfmt.Registry) error { + if swag.IsZero(o.Services) { // not required + return nil + } + + for i := 0; i < len(o.Services); i++ { + if swag.IsZero(o.Services[i]) { // not required + continue + } + + if o.Services[i] != nil { + if err := o.Services[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("services" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("services" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this list nodes OK body nodes items0 based on the context it is used +func (o *ListNodesOKBodyNodesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateAgents(ctx, formats); err != nil { + res = append(res, err) + } + + if err := o.contextValidateServices(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListNodesOKBodyNodesItems0) contextValidateAgents(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Agents); i++ { + if o.Agents[i] != nil { + if err := o.Agents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("agents" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("agents" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +func (o *ListNodesOKBodyNodesItems0) contextValidateServices(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Services); i++ { + if o.Services[i] != nil { + if err := o.Services[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("services" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("services" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0) UnmarshalBinary(b []byte) error { + var res ListNodesOKBodyNodesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesOKBodyNodesItems0AgentsItems0 list nodes OK body nodes items0 agents items0 +swagger:model ListNodesOKBodyNodesItems0AgentsItems0 +*/ +type ListNodesOKBodyNodesItems0AgentsItems0 struct { + // Unique Agent identifier. + AgentID string `json:"agent_id,omitempty"` + + // Agent type. + AgentType string `json:"agent_type,omitempty"` + + // Actual Agent status. + Status string `json:"status,omitempty"` + + // True if Agent is running and connected to pmm-managed. + IsConnected bool `json:"is_connected,omitempty"` +} + +// Validate validates this list nodes OK body nodes items0 agents items0 +func (o *ListNodesOKBodyNodesItems0AgentsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list nodes OK body nodes items0 agents items0 based on context it is used +func (o *ListNodesOKBodyNodesItems0AgentsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0AgentsItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0AgentsItems0) UnmarshalBinary(b []byte) error { + var res ListNodesOKBodyNodesItems0AgentsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListNodesOKBodyNodesItems0ServicesItems0 Service represents a service running on a node. +swagger:model ListNodesOKBodyNodesItems0ServicesItems0 +*/ +type ListNodesOKBodyNodesItems0ServicesItems0 struct { + // Unique Service identifier. + ServiceID string `json:"service_id,omitempty"` + + // Service type. + ServiceType string `json:"service_type,omitempty"` + + // Service name. + ServiceName string `json:"service_name,omitempty"` +} + +// Validate validates this list nodes OK body nodes items0 services items0 +func (o *ListNodesOKBodyNodesItems0ServicesItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list nodes OK body nodes items0 services items0 based on context it is used +func (o *ListNodesOKBodyNodesItems0ServicesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0ServicesItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListNodesOKBodyNodesItems0ServicesItems0) UnmarshalBinary(b []byte) error { + var res ListNodesOKBodyNodesItems0ServicesItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/api/managementpb/node/json/client/mgmt_node/mgmt_node_client.go b/api/managementpb/node/json/client/mgmt_node/mgmt_node_client.go new file mode 100644 index 0000000000..e758676185 --- /dev/null +++ b/api/managementpb/node/json/client/mgmt_node/mgmt_node_client.go @@ -0,0 +1,119 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package mgmt_node + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new mgmt node API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for mgmt node API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientOption is the option for Client methods +type ClientOption func(*runtime.ClientOperation) + +// ClientService is the interface for Client methods +type ClientService interface { + GetNode(params *GetNodeParams, opts ...ClientOption) (*GetNodeOK, error) + + ListNodes(params *ListNodesParams, opts ...ClientOption) (*ListNodesOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +GetNode gets node + +Returns a single Node by ID. +*/ +func (a *Client) GetNode(params *GetNodeParams, opts ...ClientOption) (*GetNodeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetNodeParams() + } + op := &runtime.ClientOperation{ + ID: "GetNode", + Method: "POST", + PathPattern: "/v1/management/Node/Get", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http", "https"}, + Params: params, + Reader: &GetNodeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetNodeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GetNodeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ListNodes lists nodes + +Returns a filtered list of Nodes. +*/ +func (a *Client) ListNodes(params *ListNodesParams, opts ...ClientOption) (*ListNodesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewListNodesParams() + } + op := &runtime.ClientOperation{ + ID: "ListNodes", + Method: "POST", + PathPattern: "/v1/management/Node/List", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http", "https"}, + Params: params, + Reader: &ListNodesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ListNodesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ListNodesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/api/managementpb/node/json/client/pmm_management_node_api_client.go b/api/managementpb/node/json/client/pmm_management_node_api_client.go new file mode 100644 index 0000000000..ad800373cd --- /dev/null +++ b/api/managementpb/node/json/client/pmm_management_node_api_client.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/percona/pmm/api/managementpb/node/json/client/mgmt_node" +) + +// Default PMM management node API HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "localhost" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http", "https"} + +// NewHTTPClient creates a new PMM management node API HTTP client. +func NewHTTPClient(formats strfmt.Registry) *PMMManagementNodeAPI { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new PMM management node API HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *PMMManagementNodeAPI { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new PMM management node API client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *PMMManagementNodeAPI { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(PMMManagementNodeAPI) + cli.Transport = transport + cli.MgmtNode = mgmt_node.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// PMMManagementNodeAPI is a client for PMM management node API +type PMMManagementNodeAPI struct { + MgmtNode mgmt_node.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *PMMManagementNodeAPI) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.MgmtNode.SetTransport(transport) +} diff --git a/api/managementpb/node/json/header.json b/api/managementpb/node/json/header.json new file mode 100644 index 0000000000..fad8ce9208 --- /dev/null +++ b/api/managementpb/node/json/header.json @@ -0,0 +1,8 @@ +{ + "swagger": "2.0", + "info": { + "title": "PMM Management Node API", + "version": "v1beta1" + }, + "schemes": ["https", "http"] +} diff --git a/api/managementpb/node/json/node.json b/api/managementpb/node/json/node.json new file mode 100644 index 0000000000..04534b6078 --- /dev/null +++ b/api/managementpb/node/json/node.json @@ -0,0 +1,470 @@ +{ + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "https", + "http" + ], + "swagger": "2.0", + "info": { + "title": "PMM Management Node API", + "version": "v1beta1" + }, + "paths": { + "/v1/management/Node/Get": { + "post": { + "description": "Returns a single Node by ID.", + "tags": [ + "MgmtNode" + ], + "summary": "Get Node", + "operationId": "GetNode", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + } + } + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "node": { + "type": "object", + "properties": { + "address": { + "description": "Node address (DNS name or IP).", + "type": "string", + "x-order": 8 + }, + "agents": { + "description": "List of agents related to this node.", + "type": "array", + "items": { + "type": "object", + "properties": { + "agent_id": { + "description": "Unique Agent identifier.", + "type": "string", + "x-order": 0 + }, + "agent_type": { + "description": "Agent type.", + "type": "string", + "x-order": 1 + }, + "is_connected": { + "description": "True if Agent is running and connected to pmm-managed.", + "type": "boolean", + "x-order": 3 + }, + "status": { + "description": "Actual Agent status.", + "type": "string", + "x-order": 2 + } + } + }, + "x-order": 15 + }, + "az": { + "description": "Node availability zone.", + "type": "string", + "x-order": 10 + }, + "container_id": { + "description": "A node's unique docker container identifier.", + "type": "string", + "x-order": 6 + }, + "container_name": { + "description": "Container name.", + "type": "string", + "x-order": 7 + }, + "created_at": { + "description": "Creation timestamp.", + "type": "string", + "format": "date-time", + "x-order": 12 + }, + "custom_labels": { + "description": "Custom user-assigned labels for Node.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-order": 11 + }, + "distro": { + "description": "Linux distribution name and version.", + "type": "string", + "x-order": 4 + }, + "machine_id": { + "description": "Linux machine-id.", + "type": "string", + "x-order": 3 + }, + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + }, + "node_model": { + "description": "Node model.", + "type": "string", + "x-order": 5 + }, + "node_name": { + "description": "User-defined node name.", + "type": "string", + "x-order": 2 + }, + "node_type": { + "description": "Node type.", + "type": "string", + "x-order": 1 + }, + "region": { + "description": "Node region.", + "type": "string", + "x-order": 9 + }, + "services": { + "description": "List of services running on this node.", + "type": "array", + "items": { + "description": "Service represents a service running on a node.", + "type": "object", + "properties": { + "service_id": { + "description": "Unique Service identifier.", + "type": "string", + "x-order": 0 + }, + "service_name": { + "description": "Service name.", + "type": "string", + "x-order": 2 + }, + "service_type": { + "description": "Service type.", + "type": "string", + "x-order": 1 + } + } + }, + "x-order": 16 + }, + "status": { + "description": "Node status.\n\n - STATUS_INVALID: Invalid status.\n - UP: The node is up.\n - DOWN: The node is down.\n - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet).", + "type": "string", + "default": "STATUS_INVALID", + "enum": [ + "STATUS_INVALID", + "UP", + "DOWN", + "UNKNOWN" + ], + "x-order": 14 + }, + "updated_at": { + "description": "Last update timestamp.", + "type": "string", + "format": "date-time", + "x-order": 13 + } + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": false + }, + "x-order": 2 + }, + "message": { + "type": "string", + "x-order": 1 + } + } + } + } + } + } + }, + "/v1/management/Node/List": { + "post": { + "description": "Returns a filtered list of Nodes.", + "tags": [ + "MgmtNode" + ], + "summary": "List Nodes", + "operationId": "ListNodes", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "node_type": { + "description": "NodeType describes supported Node types.", + "type": "string", + "default": "NODE_TYPE_INVALID", + "enum": [ + "NODE_TYPE_INVALID", + "GENERIC_NODE", + "CONTAINER_NODE", + "REMOTE_NODE", + "REMOTE_RDS_NODE", + "REMOTE_AZURE_DATABASE_NODE" + ], + "x-order": 0 + } + } + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "nodes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "address": { + "description": "Node address (DNS name or IP).", + "type": "string", + "x-order": 8 + }, + "agents": { + "description": "List of agents related to this node.", + "type": "array", + "items": { + "type": "object", + "properties": { + "agent_id": { + "description": "Unique Agent identifier.", + "type": "string", + "x-order": 0 + }, + "agent_type": { + "description": "Agent type.", + "type": "string", + "x-order": 1 + }, + "is_connected": { + "description": "True if Agent is running and connected to pmm-managed.", + "type": "boolean", + "x-order": 3 + }, + "status": { + "description": "Actual Agent status.", + "type": "string", + "x-order": 2 + } + } + }, + "x-order": 15 + }, + "az": { + "description": "Node availability zone.", + "type": "string", + "x-order": 10 + }, + "container_id": { + "description": "A node's unique docker container identifier.", + "type": "string", + "x-order": 6 + }, + "container_name": { + "description": "Container name.", + "type": "string", + "x-order": 7 + }, + "created_at": { + "description": "Creation timestamp.", + "type": "string", + "format": "date-time", + "x-order": 12 + }, + "custom_labels": { + "description": "Custom user-assigned labels for Node.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-order": 11 + }, + "distro": { + "description": "Linux distribution name and version.", + "type": "string", + "x-order": 4 + }, + "machine_id": { + "description": "Linux machine-id.", + "type": "string", + "x-order": 3 + }, + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + }, + "node_model": { + "description": "Node model.", + "type": "string", + "x-order": 5 + }, + "node_name": { + "description": "User-defined node name.", + "type": "string", + "x-order": 2 + }, + "node_type": { + "description": "Node type.", + "type": "string", + "x-order": 1 + }, + "region": { + "description": "Node region.", + "type": "string", + "x-order": 9 + }, + "services": { + "description": "List of services running on this node.", + "type": "array", + "items": { + "description": "Service represents a service running on a node.", + "type": "object", + "properties": { + "service_id": { + "description": "Unique Service identifier.", + "type": "string", + "x-order": 0 + }, + "service_name": { + "description": "Service name.", + "type": "string", + "x-order": 2 + }, + "service_type": { + "description": "Service type.", + "type": "string", + "x-order": 1 + } + } + }, + "x-order": 16 + }, + "status": { + "description": "Node status.\n\n - STATUS_INVALID: Invalid status.\n - UP: The node is up.\n - DOWN: The node is down.\n - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet).", + "type": "string", + "default": "STATUS_INVALID", + "enum": [ + "STATUS_INVALID", + "UP", + "DOWN", + "UNKNOWN" + ], + "x-order": 14 + }, + "updated_at": { + "description": "Last update timestamp.", + "type": "string", + "format": "date-time", + "x-order": 13 + } + } + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": false + }, + "x-order": 2 + }, + "message": { + "type": "string", + "x-order": 1 + } + } + } + } + } + } + } + }, + "tags": [ + { + "name": "MgmtNode" + } + ] +} \ No newline at end of file diff --git a/api/managementpb/node/node.pb.go b/api/managementpb/node/node.pb.go new file mode 100644 index 0000000000..0bb7976ee1 --- /dev/null +++ b/api/managementpb/node/node.pb.go @@ -0,0 +1,910 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: managementpb/node/node.proto + +package nodev1beta1 + +import ( + reflect "reflect" + sync "sync" + + _ "github.com/envoyproxy/protoc-gen-validate/validate" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + + inventorypb "github.com/percona/pmm/api/inventorypb" + _ "github.com/percona/pmm/api/managementpb/agent" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Node status. +type UniversalNode_Status int32 + +const ( + // Invalid status. + UniversalNode_STATUS_INVALID UniversalNode_Status = 0 + // The node is up. + UniversalNode_UP UniversalNode_Status = 1 + // The node is down. + UniversalNode_DOWN UniversalNode_Status = 2 + // The node's status cannot be known (e.g. there are no metrics yet). + UniversalNode_UNKNOWN UniversalNode_Status = 3 +) + +// Enum value maps for UniversalNode_Status. +var ( + UniversalNode_Status_name = map[int32]string{ + 0: "STATUS_INVALID", + 1: "UP", + 2: "DOWN", + 3: "UNKNOWN", + } + UniversalNode_Status_value = map[string]int32{ + "STATUS_INVALID": 0, + "UP": 1, + "DOWN": 2, + "UNKNOWN": 3, + } +) + +func (x UniversalNode_Status) Enum() *UniversalNode_Status { + p := new(UniversalNode_Status) + *p = x + return p +} + +func (x UniversalNode_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UniversalNode_Status) Descriptor() protoreflect.EnumDescriptor { + return file_managementpb_node_node_proto_enumTypes[0].Descriptor() +} + +func (UniversalNode_Status) Type() protoreflect.EnumType { + return &file_managementpb_node_node_proto_enumTypes[0] +} + +func (x UniversalNode_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UniversalNode_Status.Descriptor instead. +func (UniversalNode_Status) EnumDescriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{0, 0} +} + +type UniversalNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique Node identifier. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Node type. + NodeType string `protobuf:"bytes,2,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"` + // User-defined node name. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // Linux machine-id. + MachineId string `protobuf:"bytes,4,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` + // Linux distribution name and version. + Distro string `protobuf:"bytes,5,opt,name=distro,proto3" json:"distro,omitempty"` + // Node model. + NodeModel string `protobuf:"bytes,6,opt,name=node_model,json=nodeModel,proto3" json:"node_model,omitempty"` + // A node's unique docker container identifier. + ContainerId string `protobuf:"bytes,7,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Container name. + ContainerName string `protobuf:"bytes,8,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + // Node address (DNS name or IP). + Address string `protobuf:"bytes,9,opt,name=address,proto3" json:"address,omitempty"` + // Node region. + Region string `protobuf:"bytes,10,opt,name=region,proto3" json:"region,omitempty"` + // Node availability zone. + Az string `protobuf:"bytes,11,opt,name=az,proto3" json:"az,omitempty"` + // Custom user-assigned labels for Node. + CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Creation timestamp. + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Last update timestamp. + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + // The health status of the node. + Status UniversalNode_Status `protobuf:"varint,15,opt,name=status,proto3,enum=node.v1beta1.UniversalNode_Status" json:"status,omitempty"` + // List of agents related to this node. + Agents []*UniversalNode_Agent `protobuf:"bytes,16,rep,name=agents,proto3" json:"agents,omitempty"` + // List of services running on this node. + Services []*UniversalNode_Service `protobuf:"bytes,17,rep,name=services,proto3" json:"services,omitempty"` +} + +func (x *UniversalNode) Reset() { + *x = UniversalNode{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UniversalNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UniversalNode) ProtoMessage() {} + +func (x *UniversalNode) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UniversalNode.ProtoReflect.Descriptor instead. +func (*UniversalNode) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{0} +} + +func (x *UniversalNode) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *UniversalNode) GetNodeType() string { + if x != nil { + return x.NodeType + } + return "" +} + +func (x *UniversalNode) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +func (x *UniversalNode) GetMachineId() string { + if x != nil { + return x.MachineId + } + return "" +} + +func (x *UniversalNode) GetDistro() string { + if x != nil { + return x.Distro + } + return "" +} + +func (x *UniversalNode) GetNodeModel() string { + if x != nil { + return x.NodeModel + } + return "" +} + +func (x *UniversalNode) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} + +func (x *UniversalNode) GetContainerName() string { + if x != nil { + return x.ContainerName + } + return "" +} + +func (x *UniversalNode) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *UniversalNode) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *UniversalNode) GetAz() string { + if x != nil { + return x.Az + } + return "" +} + +func (x *UniversalNode) GetCustomLabels() map[string]string { + if x != nil { + return x.CustomLabels + } + return nil +} + +func (x *UniversalNode) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *UniversalNode) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *UniversalNode) GetStatus() UniversalNode_Status { + if x != nil { + return x.Status + } + return UniversalNode_STATUS_INVALID +} + +func (x *UniversalNode) GetAgents() []*UniversalNode_Agent { + if x != nil { + return x.Agents + } + return nil +} + +func (x *UniversalNode) GetServices() []*UniversalNode_Service { + if x != nil { + return x.Services + } + return nil +} + +type ListNodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node type to be filtered out. + NodeType inventorypb.NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.NodeType" json:"node_type,omitempty"` +} + +func (x *ListNodeRequest) Reset() { + *x = ListNodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNodeRequest) ProtoMessage() {} + +func (x *ListNodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNodeRequest.ProtoReflect.Descriptor instead. +func (*ListNodeRequest) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{1} +} + +func (x *ListNodeRequest) GetNodeType() inventorypb.NodeType { + if x != nil { + return x.NodeType + } + return inventorypb.NodeType(0) +} + +type ListNodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nodes []*UniversalNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (x *ListNodeResponse) Reset() { + *x = ListNodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNodeResponse) ProtoMessage() {} + +func (x *ListNodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNodeResponse.ProtoReflect.Descriptor instead. +func (*ListNodeResponse) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{2} +} + +func (x *ListNodeResponse) GetNodes() []*UniversalNode { + if x != nil { + return x.Nodes + } + return nil +} + +type GetNodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique Node identifier. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (x *GetNodeRequest) Reset() { + *x = GetNodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNodeRequest) ProtoMessage() {} + +func (x *GetNodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNodeRequest.ProtoReflect.Descriptor instead. +func (*GetNodeRequest) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{3} +} + +func (x *GetNodeRequest) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +type GetNodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Node *UniversalNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` +} + +func (x *GetNodeResponse) Reset() { + *x = GetNodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNodeResponse) ProtoMessage() {} + +func (x *GetNodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNodeResponse.ProtoReflect.Descriptor instead. +func (*GetNodeResponse) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{4} +} + +func (x *GetNodeResponse) GetNode() *UniversalNode { + if x != nil { + return x.Node + } + return nil +} + +// Service represents a service running on a node. +type UniversalNode_Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique Service identifier. + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Service type. + ServiceType string `protobuf:"bytes,2,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + // Service name. + ServiceName string `protobuf:"bytes,3,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *UniversalNode_Service) Reset() { + *x = UniversalNode_Service{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UniversalNode_Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UniversalNode_Service) ProtoMessage() {} + +func (x *UniversalNode_Service) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UniversalNode_Service.ProtoReflect.Descriptor instead. +func (*UniversalNode_Service) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *UniversalNode_Service) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *UniversalNode_Service) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *UniversalNode_Service) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +type UniversalNode_Agent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique Agent identifier. + AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + // Agent type. + AgentType string `protobuf:"bytes,2,opt,name=agent_type,json=agentType,proto3" json:"agent_type,omitempty"` + // Actual Agent status. + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + // True if Agent is running and connected to pmm-managed. + IsConnected bool `protobuf:"varint,4,opt,name=is_connected,json=isConnected,proto3" json:"is_connected,omitempty"` +} + +func (x *UniversalNode_Agent) Reset() { + *x = UniversalNode_Agent{} + if protoimpl.UnsafeEnabled { + mi := &file_managementpb_node_node_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UniversalNode_Agent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UniversalNode_Agent) ProtoMessage() {} + +func (x *UniversalNode_Agent) ProtoReflect() protoreflect.Message { + mi := &file_managementpb_node_node_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UniversalNode_Agent.ProtoReflect.Descriptor instead. +func (*UniversalNode_Agent) Descriptor() ([]byte, []int) { + return file_managementpb_node_node_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *UniversalNode_Agent) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *UniversalNode_Agent) GetAgentType() string { + if x != nil { + return x.AgentType + } + return "" +} + +func (x *UniversalNode_Agent) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *UniversalNode_Agent) GetIsConnected() bool { + if x != nil { + return x.IsConnected + } + return false +} + +var File_managementpb_node_node_proto protoreflect.FileDescriptor + +var file_managementpb_node_node_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x6e, + 0x6f, 0x64, 0x65, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x69, 0x6e, 0x76, + 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x70, 0x62, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, + 0x70, 0x62, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x08, 0x0a, 0x0d, 0x55, 0x6e, 0x69, 0x76, + 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x69, 0x73, 0x74, 0x72, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x73, + 0x74, 0x72, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x0e, + 0x0a, 0x02, 0x61, 0x7a, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x61, 0x7a, 0x12, 0x52, + 0x0a, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, + 0x64, 0x65, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, + 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x10, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, 0x64, + 0x65, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x3f, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x1a, 0x6e, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7c, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x69, 0x73, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x1a, 0x3f, + 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x3b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x06, 0x0a, + 0x02, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x03, 0x22, 0x43, 0x0a, 0x0f, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x30, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x45, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x29, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, + 0x65, 0x49, 0x64, 0x22, 0x42, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x4e, 0x6f, 0x64, + 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x32, 0xc6, 0x02, 0x0a, 0x08, 0x4d, 0x67, 0x6d, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x12, 0x1d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x55, 0x92, 0x41, 0x2f, 0x12, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x20, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x1a, 0x21, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x2e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x3a, 0x01, 0x2a, 0x22, 0x18, + 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x4e, + 0x6f, 0x64, 0x65, 0x2f, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x07, 0x47, 0x65, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x4d, 0x92, 0x41, 0x28, 0x12, 0x08, 0x47, 0x65, 0x74, 0x20, 0x4e, 0x6f, 0x64, 0x65, + 0x1a, 0x1c, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x20, 0x73, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x20, 0x4e, 0x6f, 0x64, 0x65, 0x20, 0x62, 0x79, 0x20, 0x49, 0x44, 0x2e, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x22, 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x4e, 0x6f, 0x64, 0x65, 0x2f, 0x47, 0x65, 0x74, + 0x42, 0xa8, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x09, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, + 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x70, 0x62, 0x2f, 0x6e, 0x6f, 0x64, 0x65, + 0x3b, 0x6e, 0x6f, 0x64, 0x65, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x4e, + 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0xca, 0x02, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0xe2, 0x02, 0x18, 0x4e, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x4e, 0x6f, + 0x64, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_managementpb_node_node_proto_rawDescOnce sync.Once + file_managementpb_node_node_proto_rawDescData = file_managementpb_node_node_proto_rawDesc +) + +func file_managementpb_node_node_proto_rawDescGZIP() []byte { + file_managementpb_node_node_proto_rawDescOnce.Do(func() { + file_managementpb_node_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_managementpb_node_node_proto_rawDescData) + }) + return file_managementpb_node_node_proto_rawDescData +} + +var ( + file_managementpb_node_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1) + file_managementpb_node_node_proto_msgTypes = make([]protoimpl.MessageInfo, 8) + file_managementpb_node_node_proto_goTypes = []interface{}{ + (UniversalNode_Status)(0), // 0: node.v1beta1.UniversalNode.Status + (*UniversalNode)(nil), // 1: node.v1beta1.UniversalNode + (*ListNodeRequest)(nil), // 2: node.v1beta1.ListNodeRequest + (*ListNodeResponse)(nil), // 3: node.v1beta1.ListNodeResponse + (*GetNodeRequest)(nil), // 4: node.v1beta1.GetNodeRequest + (*GetNodeResponse)(nil), // 5: node.v1beta1.GetNodeResponse + (*UniversalNode_Service)(nil), // 6: node.v1beta1.UniversalNode.Service + (*UniversalNode_Agent)(nil), // 7: node.v1beta1.UniversalNode.Agent + nil, // 8: node.v1beta1.UniversalNode.CustomLabelsEntry + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp + (inventorypb.NodeType)(0), // 10: inventory.NodeType + } +) + +var file_managementpb_node_node_proto_depIdxs = []int32{ + 8, // 0: node.v1beta1.UniversalNode.custom_labels:type_name -> node.v1beta1.UniversalNode.CustomLabelsEntry + 9, // 1: node.v1beta1.UniversalNode.created_at:type_name -> google.protobuf.Timestamp + 9, // 2: node.v1beta1.UniversalNode.updated_at:type_name -> google.protobuf.Timestamp + 0, // 3: node.v1beta1.UniversalNode.status:type_name -> node.v1beta1.UniversalNode.Status + 7, // 4: node.v1beta1.UniversalNode.agents:type_name -> node.v1beta1.UniversalNode.Agent + 6, // 5: node.v1beta1.UniversalNode.services:type_name -> node.v1beta1.UniversalNode.Service + 10, // 6: node.v1beta1.ListNodeRequest.node_type:type_name -> inventory.NodeType + 1, // 7: node.v1beta1.ListNodeResponse.nodes:type_name -> node.v1beta1.UniversalNode + 1, // 8: node.v1beta1.GetNodeResponse.node:type_name -> node.v1beta1.UniversalNode + 2, // 9: node.v1beta1.MgmtNode.ListNodes:input_type -> node.v1beta1.ListNodeRequest + 4, // 10: node.v1beta1.MgmtNode.GetNode:input_type -> node.v1beta1.GetNodeRequest + 3, // 11: node.v1beta1.MgmtNode.ListNodes:output_type -> node.v1beta1.ListNodeResponse + 5, // 12: node.v1beta1.MgmtNode.GetNode:output_type -> node.v1beta1.GetNodeResponse + 11, // [11:13] is the sub-list for method output_type + 9, // [9:11] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_managementpb_node_node_proto_init() } +func file_managementpb_node_node_proto_init() { + if File_managementpb_node_node_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_managementpb_node_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UniversalNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UniversalNode_Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_managementpb_node_node_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UniversalNode_Agent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_managementpb_node_node_proto_rawDesc, + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_managementpb_node_node_proto_goTypes, + DependencyIndexes: file_managementpb_node_node_proto_depIdxs, + EnumInfos: file_managementpb_node_node_proto_enumTypes, + MessageInfos: file_managementpb_node_node_proto_msgTypes, + }.Build() + File_managementpb_node_node_proto = out.File + file_managementpb_node_node_proto_rawDesc = nil + file_managementpb_node_node_proto_goTypes = nil + file_managementpb_node_node_proto_depIdxs = nil +} diff --git a/api/managementpb/node/node.pb.gw.go b/api/managementpb/node/node.pb.gw.go new file mode 100644 index 0000000000..13c3ff3c92 --- /dev/null +++ b/api/managementpb/node/node.pb.gw.go @@ -0,0 +1,248 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: managementpb/node/node.proto + +/* +Package nodev1beta1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package nodev1beta1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) + +func request_MgmtNode_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client MgmtNodeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListNodeRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_MgmtNode_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server MgmtNodeServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListNodeRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListNodes(ctx, &protoReq) + return msg, metadata, err +} + +func request_MgmtNode_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, client MgmtNodeClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetNodeRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_MgmtNode_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, server MgmtNodeServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetNodeRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetNode(ctx, &protoReq) + return msg, metadata, err +} + +// RegisterMgmtNodeHandlerServer registers the http handlers for service MgmtNode to "mux". +// UnaryRPC :call MgmtNodeServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMgmtNodeHandlerFromEndpoint instead. +func RegisterMgmtNodeHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MgmtNodeServer) error { + mux.Handle("POST", pattern_MgmtNode_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/node.v1beta1.MgmtNode/ListNodes", runtime.WithHTTPPathPattern("/v1/management/Node/List")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MgmtNode_ListNodes_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MgmtNode_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + mux.Handle("POST", pattern_MgmtNode_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/node.v1beta1.MgmtNode/GetNode", runtime.WithHTTPPathPattern("/v1/management/Node/Get")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MgmtNode_GetNode_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MgmtNode_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +// RegisterMgmtNodeHandlerFromEndpoint is same as RegisterMgmtNodeHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMgmtNodeHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMgmtNodeHandler(ctx, mux, conn) +} + +// RegisterMgmtNodeHandler registers the http handlers for service MgmtNode to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMgmtNodeHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMgmtNodeHandlerClient(ctx, mux, NewMgmtNodeClient(conn)) +} + +// RegisterMgmtNodeHandlerClient registers the http handlers for service MgmtNode +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MgmtNodeClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MgmtNodeClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MgmtNodeClient" to call the correct interceptors. +func RegisterMgmtNodeHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MgmtNodeClient) error { + mux.Handle("POST", pattern_MgmtNode_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/node.v1beta1.MgmtNode/ListNodes", runtime.WithHTTPPathPattern("/v1/management/Node/List")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MgmtNode_ListNodes_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MgmtNode_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + mux.Handle("POST", pattern_MgmtNode_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/node.v1beta1.MgmtNode/GetNode", runtime.WithHTTPPathPattern("/v1/management/Node/Get")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MgmtNode_GetNode_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MgmtNode_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +var ( + pattern_MgmtNode_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "Node", "List"}, "")) + + pattern_MgmtNode_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "Node", "Get"}, "")) +) + +var ( + forward_MgmtNode_ListNodes_0 = runtime.ForwardResponseMessage + + forward_MgmtNode_GetNode_0 = runtime.ForwardResponseMessage +) diff --git a/api/managementpb/node/node.pb.validate.go b/api/managementpb/node/node.pb.validate.go new file mode 100644 index 0000000000..b1bd76f536 --- /dev/null +++ b/api/managementpb/node/node.pb.validate.go @@ -0,0 +1,977 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: managementpb/node/node.proto + +package nodev1beta1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + inventorypb "github.com/percona/pmm/api/inventorypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = inventorypb.NodeType(0) +) + +// Validate checks the field values on UniversalNode with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *UniversalNode) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UniversalNode with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in UniversalNodeMultiError, or +// nil if none found. +func (m *UniversalNode) ValidateAll() error { + return m.validate(true) +} + +func (m *UniversalNode) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NodeId + + // no validation rules for NodeType + + // no validation rules for NodeName + + // no validation rules for MachineId + + // no validation rules for Distro + + // no validation rules for NodeModel + + // no validation rules for ContainerId + + // no validation rules for ContainerName + + // no validation rules for Address + + // no validation rules for Region + + // no validation rules for Az + + // no validation rules for CustomLabels + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UniversalNodeValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpdatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UniversalNodeValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Status + + for idx, item := range m.GetAgents() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: fmt.Sprintf("Agents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: fmt.Sprintf("Agents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UniversalNodeValidationError{ + field: fmt.Sprintf("Agents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetServices() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UniversalNodeValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UniversalNodeValidationError{ + field: fmt.Sprintf("Services[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return UniversalNodeMultiError(errors) + } + + return nil +} + +// UniversalNodeMultiError is an error wrapping multiple validation errors +// returned by UniversalNode.ValidateAll() if the designated constraints +// aren't met. +type UniversalNodeMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UniversalNodeMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UniversalNodeMultiError) AllErrors() []error { return m } + +// UniversalNodeValidationError is the validation error returned by +// UniversalNode.Validate if the designated constraints aren't met. +type UniversalNodeValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UniversalNodeValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UniversalNodeValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UniversalNodeValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UniversalNodeValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UniversalNodeValidationError) ErrorName() string { return "UniversalNodeValidationError" } + +// Error satisfies the builtin error interface +func (e UniversalNodeValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUniversalNode.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UniversalNodeValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UniversalNodeValidationError{} + +// Validate checks the field values on ListNodeRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListNodeRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListNodeRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListNodeRequestMultiError, or nil if none found. +func (m *ListNodeRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListNodeRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NodeType + + if len(errors) > 0 { + return ListNodeRequestMultiError(errors) + } + + return nil +} + +// ListNodeRequestMultiError is an error wrapping multiple validation errors +// returned by ListNodeRequest.ValidateAll() if the designated constraints +// aren't met. +type ListNodeRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListNodeRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListNodeRequestMultiError) AllErrors() []error { return m } + +// ListNodeRequestValidationError is the validation error returned by +// ListNodeRequest.Validate if the designated constraints aren't met. +type ListNodeRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListNodeRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListNodeRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListNodeRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListNodeRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListNodeRequestValidationError) ErrorName() string { return "ListNodeRequestValidationError" } + +// Error satisfies the builtin error interface +func (e ListNodeRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListNodeRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListNodeRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListNodeRequestValidationError{} + +// Validate checks the field values on ListNodeResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ListNodeResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListNodeResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListNodeResponseMultiError, or nil if none found. +func (m *ListNodeResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListNodeResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetNodes() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListNodeResponseValidationError{ + field: fmt.Sprintf("Nodes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListNodeResponseValidationError{ + field: fmt.Sprintf("Nodes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListNodeResponseValidationError{ + field: fmt.Sprintf("Nodes[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListNodeResponseMultiError(errors) + } + + return nil +} + +// ListNodeResponseMultiError is an error wrapping multiple validation errors +// returned by ListNodeResponse.ValidateAll() if the designated constraints +// aren't met. +type ListNodeResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListNodeResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListNodeResponseMultiError) AllErrors() []error { return m } + +// ListNodeResponseValidationError is the validation error returned by +// ListNodeResponse.Validate if the designated constraints aren't met. +type ListNodeResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListNodeResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListNodeResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListNodeResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListNodeResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListNodeResponseValidationError) ErrorName() string { return "ListNodeResponseValidationError" } + +// Error satisfies the builtin error interface +func (e ListNodeResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListNodeResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListNodeResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListNodeResponseValidationError{} + +// Validate checks the field values on GetNodeRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GetNodeRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetNodeRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GetNodeRequestMultiError, +// or nil if none found. +func (m *GetNodeRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetNodeRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NodeId + + if len(errors) > 0 { + return GetNodeRequestMultiError(errors) + } + + return nil +} + +// GetNodeRequestMultiError is an error wrapping multiple validation errors +// returned by GetNodeRequest.ValidateAll() if the designated constraints +// aren't met. +type GetNodeRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetNodeRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetNodeRequestMultiError) AllErrors() []error { return m } + +// GetNodeRequestValidationError is the validation error returned by +// GetNodeRequest.Validate if the designated constraints aren't met. +type GetNodeRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetNodeRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetNodeRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetNodeRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetNodeRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetNodeRequestValidationError) ErrorName() string { return "GetNodeRequestValidationError" } + +// Error satisfies the builtin error interface +func (e GetNodeRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetNodeRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetNodeRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetNodeRequestValidationError{} + +// Validate checks the field values on GetNodeResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetNodeResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetNodeResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetNodeResponseMultiError, or nil if none found. +func (m *GetNodeResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetNodeResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetNode()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetNodeResponseValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetNodeResponseValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetNodeResponseValidationError{ + field: "Node", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetNodeResponseMultiError(errors) + } + + return nil +} + +// GetNodeResponseMultiError is an error wrapping multiple validation errors +// returned by GetNodeResponse.ValidateAll() if the designated constraints +// aren't met. +type GetNodeResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetNodeResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetNodeResponseMultiError) AllErrors() []error { return m } + +// GetNodeResponseValidationError is the validation error returned by +// GetNodeResponse.Validate if the designated constraints aren't met. +type GetNodeResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetNodeResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetNodeResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetNodeResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetNodeResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetNodeResponseValidationError) ErrorName() string { return "GetNodeResponseValidationError" } + +// Error satisfies the builtin error interface +func (e GetNodeResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetNodeResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetNodeResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetNodeResponseValidationError{} + +// Validate checks the field values on UniversalNode_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UniversalNode_Service) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UniversalNode_Service with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UniversalNode_ServiceMultiError, or nil if none found. +func (m *UniversalNode_Service) ValidateAll() error { + return m.validate(true) +} + +func (m *UniversalNode_Service) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceId + + // no validation rules for ServiceType + + // no validation rules for ServiceName + + if len(errors) > 0 { + return UniversalNode_ServiceMultiError(errors) + } + + return nil +} + +// UniversalNode_ServiceMultiError is an error wrapping multiple validation +// errors returned by UniversalNode_Service.ValidateAll() if the designated +// constraints aren't met. +type UniversalNode_ServiceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UniversalNode_ServiceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UniversalNode_ServiceMultiError) AllErrors() []error { return m } + +// UniversalNode_ServiceValidationError is the validation error returned by +// UniversalNode_Service.Validate if the designated constraints aren't met. +type UniversalNode_ServiceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UniversalNode_ServiceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UniversalNode_ServiceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UniversalNode_ServiceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UniversalNode_ServiceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UniversalNode_ServiceValidationError) ErrorName() string { + return "UniversalNode_ServiceValidationError" +} + +// Error satisfies the builtin error interface +func (e UniversalNode_ServiceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUniversalNode_Service.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UniversalNode_ServiceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UniversalNode_ServiceValidationError{} + +// Validate checks the field values on UniversalNode_Agent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UniversalNode_Agent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UniversalNode_Agent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UniversalNode_AgentMultiError, or nil if none found. +func (m *UniversalNode_Agent) ValidateAll() error { + return m.validate(true) +} + +func (m *UniversalNode_Agent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AgentId + + // no validation rules for AgentType + + // no validation rules for Status + + // no validation rules for IsConnected + + if len(errors) > 0 { + return UniversalNode_AgentMultiError(errors) + } + + return nil +} + +// UniversalNode_AgentMultiError is an error wrapping multiple validation +// errors returned by UniversalNode_Agent.ValidateAll() if the designated +// constraints aren't met. +type UniversalNode_AgentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UniversalNode_AgentMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UniversalNode_AgentMultiError) AllErrors() []error { return m } + +// UniversalNode_AgentValidationError is the validation error returned by +// UniversalNode_Agent.Validate if the designated constraints aren't met. +type UniversalNode_AgentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UniversalNode_AgentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UniversalNode_AgentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UniversalNode_AgentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UniversalNode_AgentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UniversalNode_AgentValidationError) ErrorName() string { + return "UniversalNode_AgentValidationError" +} + +// Error satisfies the builtin error interface +func (e UniversalNode_AgentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUniversalNode_Agent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UniversalNode_AgentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UniversalNode_AgentValidationError{} diff --git a/api/managementpb/node/node.proto b/api/managementpb/node/node.proto new file mode 100644 index 0000000000..faf37d49f9 --- /dev/null +++ b/api/managementpb/node/node.proto @@ -0,0 +1,129 @@ +syntax = "proto3"; + +package node.v1beta1; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; +import "inventorypb/agents.proto"; +import "inventorypb/nodes.proto"; +import "managementpb/agent/agent.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; +import "validate/validate.proto"; + +option go_package = "api/managementpb/node;nodev1beta1"; + +message UniversalNode { + // Node status. + enum Status { + // Invalid status. + STATUS_INVALID = 0; + // The node is up. + UP = 1; + // The node is down. + DOWN = 2; + // The node's status cannot be known (e.g. there are no metrics yet). + UNKNOWN = 3; + } + // Service represents a service running on a node. + message Service { + // Unique Service identifier. + string service_id = 1; + // Service type. + string service_type = 2; + // Service name. + string service_name = 3; + } + + message Agent { + // Unique Agent identifier. + string agent_id = 1; + // Agent type. + string agent_type = 2; + // Actual Agent status. + string status = 3; + // True if Agent is running and connected to pmm-managed. + bool is_connected = 4; + } + + // Unique Node identifier. + string node_id = 1; + // Node type. + string node_type = 2; + // User-defined node name. + string node_name = 3; + // Linux machine-id. + string machine_id = 4; + // Linux distribution name and version. + string distro = 5; + // Node model. + string node_model = 6; + // A node's unique docker container identifier. + string container_id = 7; + // Container name. + string container_name = 8; + // Node address (DNS name or IP). + string address = 9; + // Node region. + string region = 10; + // Node availability zone. + string az = 11; + // Custom user-assigned labels for Node. + map custom_labels = 12; + // Creation timestamp. + google.protobuf.Timestamp created_at = 13; + // Last update timestamp. + google.protobuf.Timestamp updated_at = 14; + // The health status of the node. + Status status = 15; + // List of agents related to this node. + repeated Agent agents = 16; + // List of services running on this node. + repeated Service services = 17; +} + +message ListNodeRequest { + // Node type to be filtered out. + inventory.NodeType node_type = 1; +} + +message ListNodeResponse { + repeated UniversalNode nodes = 1; +} + +message GetNodeRequest { + // Unique Node identifier. + string node_id = 1; +} + +message GetNodeResponse { + UniversalNode node = 1; +} + +// NOTE: the GA version of this API will be integrated into managementpb/node.proto. +// all `/v1/management/Node/...` endpoints are singular, which follows the convention of that API. + +// MgmtNode service provides public Management API methods for Nodes. +service MgmtNode { + // ListNode returns a list of nodes. + rpc ListNodes(ListNodeRequest) returns (ListNodeResponse) { + option (google.api.http) = { + post: "/v1/management/Node/List" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + summary: "List Nodes" + description: "Returns a filtered list of Nodes." + }; + } + // GetNode returns a single Node by ID. + rpc GetNode(GetNodeRequest) returns (GetNodeResponse) { + option (google.api.http) = { + post: "/v1/management/Node/Get" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + summary: "Get Node" + description: "Returns a single Node by ID." + }; + } +} diff --git a/api/managementpb/node/node_grpc.pb.go b/api/managementpb/node/node_grpc.pb.go new file mode 100644 index 0000000000..14e4f461d1 --- /dev/null +++ b/api/managementpb/node/node_grpc.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: managementpb/node/node.proto + +package nodev1beta1 + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + MgmtNode_ListNodes_FullMethodName = "/node.v1beta1.MgmtNode/ListNodes" + MgmtNode_GetNode_FullMethodName = "/node.v1beta1.MgmtNode/GetNode" +) + +// MgmtNodeClient is the client API for MgmtNode service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MgmtNodeClient interface { + // ListNode returns a list of nodes. + ListNodes(ctx context.Context, in *ListNodeRequest, opts ...grpc.CallOption) (*ListNodeResponse, error) + // GetNode returns a single Node by ID. + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) +} + +type mgmtNodeClient struct { + cc grpc.ClientConnInterface +} + +func NewMgmtNodeClient(cc grpc.ClientConnInterface) MgmtNodeClient { + return &mgmtNodeClient{cc} +} + +func (c *mgmtNodeClient) ListNodes(ctx context.Context, in *ListNodeRequest, opts ...grpc.CallOption) (*ListNodeResponse, error) { + out := new(ListNodeResponse) + err := c.cc.Invoke(ctx, MgmtNode_ListNodes_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mgmtNodeClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := c.cc.Invoke(ctx, MgmtNode_GetNode_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MgmtNodeServer is the server API for MgmtNode service. +// All implementations must embed UnimplementedMgmtNodeServer +// for forward compatibility +type MgmtNodeServer interface { + // ListNode returns a list of nodes. + ListNodes(context.Context, *ListNodeRequest) (*ListNodeResponse, error) + // GetNode returns a single Node by ID. + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + mustEmbedUnimplementedMgmtNodeServer() +} + +// UnimplementedMgmtNodeServer must be embedded to have forward compatible implementations. +type UnimplementedMgmtNodeServer struct{} + +func (UnimplementedMgmtNodeServer) ListNodes(context.Context, *ListNodeRequest) (*ListNodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented") +} + +func (UnimplementedMgmtNodeServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented") +} +func (UnimplementedMgmtNodeServer) mustEmbedUnimplementedMgmtNodeServer() {} + +// UnsafeMgmtNodeServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MgmtNodeServer will +// result in compilation errors. +type UnsafeMgmtNodeServer interface { + mustEmbedUnimplementedMgmtNodeServer() +} + +func RegisterMgmtNodeServer(s grpc.ServiceRegistrar, srv MgmtNodeServer) { + s.RegisterService(&MgmtNode_ServiceDesc, srv) +} + +func _MgmtNode_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MgmtNodeServer).ListNodes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MgmtNode_ListNodes_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MgmtNodeServer).ListNodes(ctx, req.(*ListNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MgmtNode_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MgmtNodeServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MgmtNode_GetNode_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MgmtNodeServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// MgmtNode_ServiceDesc is the grpc.ServiceDesc for MgmtNode service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MgmtNode_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "node.v1beta1.MgmtNode", + HandlerType: (*MgmtNodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNodes", + Handler: _MgmtNode_ListNodes_Handler, + }, + { + MethodName: "GetNode", + Handler: _MgmtNode_GetNode_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "managementpb/node/node.proto", +} diff --git a/api/managementpb/service.pb.validate.go b/api/managementpb/service.pb.validate.go index fe524f5253..806763f70f 100644 --- a/api/managementpb/service.pb.validate.go +++ b/api/managementpb/service.pb.validate.go @@ -36,7 +36,7 @@ var ( _ = anypb.Any{} _ = sort.Sort - _ = inventorypb.ServiceType(0) + _ = inventorypb.NodeType(0) ) // Validate checks the field values on AddNodeParams with the rules defined in diff --git a/api/managementpb/service/json/client/mgmt_service/list_services_responses.go b/api/managementpb/service/json/client/mgmt_service/list_services_responses.go index 2385bf22df..14dc7e9404 100644 --- a/api/managementpb/service/json/client/mgmt_service/list_services_responses.go +++ b/api/managementpb/service/json/client/mgmt_service/list_services_responses.go @@ -535,7 +535,7 @@ type ListServicesOKBodyServicesItems0 struct { // - STATUS_INVALID: In case we don't support the db vendor yet. // - UP: The service is up. // - DOWN: The service is down. - // - UNKNOWN: The service's status cannot be known (p.e. there are no metrics yet). + // - UNKNOWN: The service's status cannot be known (e.g. there are no metrics yet). // Enum: [STATUS_INVALID UP DOWN UNKNOWN] Status *string `json:"status,omitempty"` } @@ -718,7 +718,7 @@ ListServicesOKBodyServicesItems0AgentsItems0 list services OK body services item swagger:model ListServicesOKBodyServicesItems0AgentsItems0 */ type ListServicesOKBodyServicesItems0AgentsItems0 struct { - // Unique randomly generated instance identifier. + // Unique agent identifier. AgentID string `json:"agent_id,omitempty"` // True if the agent password is set. diff --git a/api/managementpb/service/json/service.json b/api/managementpb/service/json/service.json index b8f059e9fb..6455fde1e3 100644 --- a/api/managementpb/service/json/service.json +++ b/api/managementpb/service/json/service.json @@ -84,7 +84,7 @@ "type": "object", "properties": { "agent_id": { - "description": "Unique randomly generated instance identifier.", + "description": "Unique agent identifier.", "type": "string", "x-order": 0 }, @@ -469,7 +469,7 @@ "x-order": 13 }, "status": { - "description": "Service status.\n\n - STATUS_INVALID: In case we don't support the db vendor yet.\n - UP: The service is up.\n - DOWN: The service is down.\n - UNKNOWN: The service's status cannot be known (p.e. there are no metrics yet).", + "description": "Service status.\n\n - STATUS_INVALID: In case we don't support the db vendor yet.\n - UP: The service is up.\n - DOWN: The service is down.\n - UNKNOWN: The service's status cannot be known (e.g. there are no metrics yet).", "type": "string", "default": "STATUS_INVALID", "enum": [ diff --git a/api/managementpb/service/service.pb.go b/api/managementpb/service/service.pb.go index 75409b10f1..7626c4440b 100644 --- a/api/managementpb/service/service.pb.go +++ b/api/managementpb/service/service.pb.go @@ -37,7 +37,7 @@ const ( UniversalService_UP UniversalService_Status = 1 // The service is down. UniversalService_DOWN UniversalService_Status = 2 - // The service's status cannot be known (p.e. there are no metrics yet). + // The service's status cannot be known (e.g. there are no metrics yet). UniversalService_UNKNOWN UniversalService_Status = 3 ) diff --git a/api/managementpb/service/service.proto b/api/managementpb/service/service.proto index 6bc11afd18..218551fa62 100644 --- a/api/managementpb/service/service.proto +++ b/api/managementpb/service/service.proto @@ -20,7 +20,7 @@ message UniversalService { UP = 1; // The service is down. DOWN = 2; - // The service's status cannot be known (p.e. there are no metrics yet). + // The service's status cannot be known (e.g. there are no metrics yet). UNKNOWN = 3; } @@ -79,8 +79,8 @@ message ListServiceResponse { repeated UniversalService services = 1; } -// NOTE: the GA version of this API is will be integrated into managementpb/service.proto. -// Therefore, `/v1/management/Service/List` is intentionally singular. +// NOTE: the GA version of this API will be integrated into managementpb/service.proto. +// `/v1/management/Service/List` is singular, which follows the convention of that API. // MgmtService service provides public methods for managing and querying Services. service MgmtService { diff --git a/api/qanpb/json/client/object_details/get_metrics_responses.go b/api/qanpb/json/client/object_details/get_metrics_responses.go index 5416259ce0..23bcd854fc 100644 --- a/api/qanpb/json/client/object_details/get_metrics_responses.go +++ b/api/qanpb/json/client/object_details/get_metrics_responses.go @@ -429,6 +429,9 @@ type GetMetricsOKBody struct { // fingerprint Fingerprint string `json:"fingerprint,omitempty"` + + // metadata + Metadata *GetMetricsOKBodyMetadata `json:"metadata,omitempty"` } // Validate validates this get metrics OK body @@ -447,6 +450,10 @@ func (o *GetMetricsOKBody) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := o.validateMetadata(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -531,6 +538,25 @@ func (o *GetMetricsOKBody) validateTotals(formats strfmt.Registry) error { return nil } +func (o *GetMetricsOKBody) validateMetadata(formats strfmt.Registry) error { + if swag.IsZero(o.Metadata) { // not required + return nil + } + + if o.Metadata != nil { + if err := o.Metadata.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getMetricsOk" + "." + "metadata") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getMetricsOk" + "." + "metadata") + } + return err + } + } + + return nil +} + // ContextValidate validate this get metrics OK body based on the context it is used func (o *GetMetricsOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -547,6 +573,10 @@ func (o *GetMetricsOKBody) ContextValidate(ctx context.Context, formats strfmt.R res = append(res, err) } + if err := o.contextValidateMetadata(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -594,6 +624,21 @@ func (o *GetMetricsOKBody) contextValidateTotals(ctx context.Context, formats st return nil } +func (o *GetMetricsOKBody) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { + if o.Metadata != nil { + if err := o.Metadata.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("getMetricsOk" + "." + "metadata") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("getMetricsOk" + "." + "metadata") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (o *GetMetricsOKBody) MarshalBinary() ([]byte, error) { if o == nil { @@ -612,6 +657,76 @@ func (o *GetMetricsOKBody) UnmarshalBinary(b []byte) error { return nil } +/* +GetMetricsOKBodyMetadata GetSlecetedQueryMetadataReply consists selected query metadata to show in details for given query ID. +swagger:model GetMetricsOKBodyMetadata +*/ +type GetMetricsOKBodyMetadata struct { + // service name + ServiceName string `json:"service_name,omitempty"` + + // database + Database string `json:"database,omitempty"` + + // schema + Schema string `json:"schema,omitempty"` + + // username + Username string `json:"username,omitempty"` + + // replication set + ReplicationSet string `json:"replication_set,omitempty"` + + // cluster + Cluster string `json:"cluster,omitempty"` + + // service type + ServiceType string `json:"service_type,omitempty"` + + // service id + ServiceID string `json:"service_id,omitempty"` + + // environment + Environment string `json:"environment,omitempty"` + + // node id + NodeID string `json:"node_id,omitempty"` + + // node name + NodeName string `json:"node_name,omitempty"` + + // node type + NodeType string `json:"node_type,omitempty"` +} + +// Validate validates this get metrics OK body metadata +func (o *GetMetricsOKBodyMetadata) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get metrics OK body metadata based on context it is used +func (o *GetMetricsOKBodyMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetMetricsOKBodyMetadata) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetMetricsOKBodyMetadata) UnmarshalBinary(b []byte) error { + var res GetMetricsOKBodyMetadata + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + /* GetMetricsOKBodyMetricsAnon MetricValues is statistics of specific metric. swagger:model GetMetricsOKBodyMetricsAnon diff --git a/api/qanpb/json/qanpb.json b/api/qanpb/json/qanpb.json index f535bebfc8..3a17f462ec 100644 --- a/api/qanpb/json/qanpb.json +++ b/api/qanpb/json/qanpb.json @@ -1232,6 +1232,61 @@ "type": "string", "x-order": 4 }, + "metadata": { + "description": "GetSlecetedQueryMetadataReply consists selected query metadata to show in details for given query ID.", + "type": "object", + "properties": { + "cluster": { + "type": "string", + "x-order": 5 + }, + "database": { + "type": "string", + "x-order": 1 + }, + "environment": { + "type": "string", + "x-order": 8 + }, + "node_id": { + "type": "string", + "x-order": 9 + }, + "node_name": { + "type": "string", + "x-order": 10 + }, + "node_type": { + "type": "string", + "x-order": 11 + }, + "replication_set": { + "type": "string", + "x-order": 4 + }, + "schema": { + "type": "string", + "x-order": 2 + }, + "service_id": { + "type": "string", + "x-order": 7 + }, + "service_name": { + "type": "string", + "x-order": 0 + }, + "service_type": { + "type": "string", + "x-order": 6 + }, + "username": { + "type": "string", + "x-order": 3 + } + }, + "x-order": 5 + }, "metrics": { "type": "object", "additionalProperties": { diff --git a/api/qanpb/object_details.pb.go b/api/qanpb/object_details.pb.go index d0727409fc..e747337976 100644 --- a/api/qanpb/object_details.pb.go +++ b/api/qanpb/object_details.pb.go @@ -129,11 +129,12 @@ type MetricsReply struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Metrics map[string]*MetricValues `protobuf:"bytes,3,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TextMetrics map[string]string `protobuf:"bytes,7,rep,name=text_metrics,json=textMetrics,proto3" json:"text_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Sparkline []*Point `protobuf:"bytes,4,rep,name=sparkline,proto3" json:"sparkline,omitempty"` - Totals map[string]*MetricValues `protobuf:"bytes,5,rep,name=totals,proto3" json:"totals,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Metrics map[string]*MetricValues `protobuf:"bytes,3,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TextMetrics map[string]string `protobuf:"bytes,7,rep,name=text_metrics,json=textMetrics,proto3" json:"text_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Sparkline []*Point `protobuf:"bytes,4,rep,name=sparkline,proto3" json:"sparkline,omitempty"` + Totals map[string]*MetricValues `protobuf:"bytes,5,rep,name=totals,proto3" json:"totals,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Metadata *GetSelectedQueryMetadataReply `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"` } func (x *MetricsReply) Reset() { @@ -203,6 +204,13 @@ func (x *MetricsReply) GetFingerprint() string { return "" } +func (x *MetricsReply) GetMetadata() *GetSelectedQueryMetadataReply { + if x != nil { + return x.Metadata + } + return nil +} + // MetricValues is statistics of specific metric. type MetricValues struct { state protoimpl.MessageState @@ -1253,6 +1261,142 @@ func (x *ExplainFingerprintByQueryIDReply) GetPlaceholdersCount() uint32 { return 0 } +// GetSlecetedQueryMetadataReply consists selected query metadata to show in details for given query ID. +type GetSelectedQueryMetadataReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"` + Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` + Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"` + ReplicationSet string `protobuf:"bytes,5,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"` + Cluster string `protobuf:"bytes,6,opt,name=cluster,proto3" json:"cluster,omitempty"` + ServiceType string `protobuf:"bytes,7,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"` + ServiceId string `protobuf:"bytes,8,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Environment string `protobuf:"bytes,9,opt,name=environment,proto3" json:"environment,omitempty"` + NodeId string `protobuf:"bytes,10,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + NodeType string `protobuf:"bytes,12,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"` +} + +func (x *GetSelectedQueryMetadataReply) Reset() { + *x = GetSelectedQueryMetadataReply{} + if protoimpl.UnsafeEnabled { + mi := &file_qanpb_object_details_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSelectedQueryMetadataReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSelectedQueryMetadataReply) ProtoMessage() {} + +func (x *GetSelectedQueryMetadataReply) ProtoReflect() protoreflect.Message { + mi := &file_qanpb_object_details_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSelectedQueryMetadataReply.ProtoReflect.Descriptor instead. +func (*GetSelectedQueryMetadataReply) Descriptor() ([]byte, []int) { + return file_qanpb_object_details_proto_rawDescGZIP(), []int{18} +} + +func (x *GetSelectedQueryMetadataReply) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetDatabase() string { + if x != nil { + return x.Database + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetSchema() string { + if x != nil { + return x.Schema + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetReplicationSet() string { + if x != nil { + return x.ReplicationSet + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetServiceType() string { + if x != nil { + return x.ServiceType + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetEnvironment() string { + if x != nil { + return x.Environment + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +func (x *GetSelectedQueryMetadataReply) GetNodeType() string { + if x != nil { + return x.NodeType + } + return "" +} + var File_qanpb_object_details_proto protoreflect.FileDescriptor var file_qanpb_object_details_proto_rawDesc = []byte{ @@ -1286,7 +1430,7 @@ var file_qanpb_object_details_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x22, 0x9f, 0x04, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x22, 0xe7, 0x04, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, @@ -1306,127 +1450,41 @@ var file_qanpb_object_details_proto_rawDesc = []byte{ 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x1a, 0x55, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x54, 0x65, 0x78, 0x74, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x54, 0x0a, 0x0b, 0x54, 0x6f, 0x74, 0x61, - 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb8, - 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x72, - 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, - 0x52, 0x03, 0x63, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x61, - 0x76, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x61, 0x76, 0x67, 0x12, 0x10, 0x0a, - 0x03, 0x70, 0x39, 0x39, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x70, 0x39, 0x39, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x06, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xa3, 0x02, 0x0a, 0x13, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x42, 0x0a, 0x0f, 0x70, 0x65, 0x72, - 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, - 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x6f, 0x12, 0x1b, 0x0a, - 0x09, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x42, 0x79, 0x12, 0x32, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, - 0x55, 0x0a, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x78, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, - 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x22, 0xe5, 0x03, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3b, 0x0a, 0x0c, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x69, 0x73, 0x54, - 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, - 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x65, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, - 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xe0, - 0x01, 0x0a, 0x1a, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, - 0x11, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x66, 0x72, - 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x42, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x42, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, - 0x62, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, - 0x79, 0x22, 0xbe, 0x01, 0x0a, 0x18, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, - 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x57, 0x0a, 0x0b, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x61, 0x6e, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x29, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x2c, 0x0a, - 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x22, 0x47, 0x0a, 0x0e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, - 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, - 0x6c, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x50, 0x6c, 0x61, 0x6e, 0x22, 0xec, 0x01, 0x0a, 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, - 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x65, 0x72, + 0x74, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x55, 0x0a, 0x0c, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x61, 0x6e, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x3e, 0x0a, 0x10, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x54, 0x0a, 0x0b, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x72, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x63, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x63, 0x6e, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x12, + 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x6d, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, + 0x6d, 0x61, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x76, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, + 0x52, 0x03, 0x61, 0x76, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x39, 0x39, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x03, 0x70, 0x39, 0x39, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x74, 0x61, + 0x6c, 0x22, 0x1e, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xa3, 0x02, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, @@ -1435,111 +1493,227 @@ var file_qanpb_object_details_proto_rawDesc = []byte{ 0x74, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x54, 0x6f, 0x12, 0x32, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x69, 0x64, 0x22, 0x55, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x43, 0x0a, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, - 0x61, 0x6d, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x22, - 0x48, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x5d, 0x0a, 0x22, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x69, 0x64, 0x12, 0x19, 0x0a, - 0x08, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x49, 0x64, 0x22, 0x82, 0x01, 0x0a, 0x20, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2f, 0x0a, - 0x13, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, - 0x72, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x65, 0x78, 0x70, 0x6c, - 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x2d, - 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0xc2, 0x07, - 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, - 0x71, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, - 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x71, 0x61, 0x6e, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, - 0x22, 0x20, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x85, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, - 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, - 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x27, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, + 0x61, 0x72, 0x74, 0x54, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x42, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x12, 0x32, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x55, 0x0a, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x40, 0x0a, 0x0e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, + 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x22, 0xe5, + 0x03, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x3b, 0x0a, 0x0c, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0b, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x73, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x2d, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x65, 0x78, + 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x16, + 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x42, 0x0a, + 0x0f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x6f, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, + 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x79, 0x12, 0x19, + 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x22, 0xbe, 0x01, 0x0a, 0x18, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x57, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, 0x0a, 0x0f, 0x4c, 0x69, + 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x2c, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x69, 0x64, 0x22, 0x47, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x22, 0xec, 0x01, 0x0a, + 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x46, 0x0a, 0x11, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x42, 0x0a, 0x0f, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, + 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x6f, 0x12, 0x32, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x22, 0x55, 0x0a, 0x0e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x43, 0x0a, + 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, + 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x48, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x22, 0x5d, 0x0a, 0x22, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, + 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x79, 0x49, 0x64, + 0x22, 0x82, 0x01, 0x0a, 0x20, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, + 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2f, 0x0a, 0x13, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, + 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x11, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x8c, 0x03, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, + 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, + 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x32, 0xc2, 0x07, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x71, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x2b, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, + 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x85, 0x01, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x20, 0x2e, + 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x30, 0x2f, + 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x2f, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x12, 0x87, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x27, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, - 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x12, 0x79, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x1d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, - 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x3a, 0x01, 0x2a, 0x22, 0x22, 0x2f, 0x76, 0x30, - 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, - 0x79, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, - 0x1d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, - 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x2d, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x27, 0x3a, 0x01, 0x2a, 0x22, 0x22, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, - 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x78, 0x0a, 0x0b, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x71, 0x61, 0x6e, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, - 0x2a, 0x22, 0x21, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, - 0x69, 0x73, 0x74, 0x73, 0x12, 0xbb, 0x01, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, - 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x49, 0x44, 0x12, 0x2f, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, - 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x3a, 0x01, 0x2a, 0x22, - 0x31, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, - 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x49, 0x44, 0x42, 0x9f, 0x01, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, - 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x12, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, - 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x71, 0x61, 0x6e, 0x70, 0x62, 0x3b, 0x71, - 0x61, 0x6e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x51, 0x58, 0x58, 0xaa, - 0x02, 0x0b, 0x51, 0x61, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x0b, - 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x17, 0x51, 0x61, - 0x6e, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0c, 0x51, 0x61, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x69, 0x6c, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x30, 0x2f, + 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x2f, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x79, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x1d, 0x2e, 0x71, 0x61, + 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x71, 0x61, 0x6e, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, + 0x61, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x3a, + 0x01, 0x2a, 0x22, 0x22, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x79, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x1d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x3a, 0x01, 0x2a, 0x22, 0x22, 0x2f, + 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x47, 0x65, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x78, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, + 0x12, 0x1f, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2c, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x22, 0x21, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, + 0x6e, 0x2f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0xbb, 0x01, 0x0a, 0x1b, + 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x12, 0x2f, 0x2e, 0x71, 0x61, + 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x71, + 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, 0x79, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x3c, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x36, 0x3a, 0x01, 0x2a, 0x22, 0x31, 0x2f, 0x76, 0x30, 0x2f, 0x71, 0x61, 0x6e, 0x2f, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x45, 0x78, + 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x42, 0x79, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x44, 0x42, 0x9f, 0x01, 0x0a, 0x0f, 0x63, 0x6f, + 0x6d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x12, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x71, 0x61, 0x6e, 0x70, 0x62, 0x3b, 0x71, 0x61, 0x6e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0xa2, 0x02, 0x03, 0x51, 0x58, 0x58, 0xaa, 0x02, 0x0b, 0x51, 0x61, 0x6e, 0x2e, 0x56, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x0b, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0xe2, 0x02, 0x17, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0c, 0x51, + 0x61, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1555,7 +1729,7 @@ func file_qanpb_object_details_proto_rawDescGZIP() []byte { } var ( - file_qanpb_object_details_proto_msgTypes = make([]protoimpl.MessageInfo, 22) + file_qanpb_object_details_proto_msgTypes = make([]protoimpl.MessageInfo, 23) file_qanpb_object_details_proto_goTypes = []interface{}{ (*MetricsRequest)(nil), // 0: qan.v1beta1.MetricsRequest (*MetricsReply)(nil), // 1: qan.v1beta1.MetricsReply @@ -1575,62 +1749,64 @@ var ( (*QueryExistsRequest)(nil), // 15: qan.v1beta1.QueryExistsRequest (*ExplainFingerprintByQueryIDRequest)(nil), // 16: qan.v1beta1.ExplainFingerprintByQueryIDRequest (*ExplainFingerprintByQueryIDReply)(nil), // 17: qan.v1beta1.ExplainFingerprintByQueryIDReply - nil, // 18: qan.v1beta1.MetricsReply.MetricsEntry - nil, // 19: qan.v1beta1.MetricsReply.TextMetricsEntry - nil, // 20: qan.v1beta1.MetricsReply.TotalsEntry - nil, // 21: qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry - (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp - (*MapFieldEntry)(nil), // 23: qan.v1beta1.MapFieldEntry - (*Point)(nil), // 24: qan.v1beta1.Point - (ExampleFormat)(0), // 25: qan.v1beta1.ExampleFormat - (ExampleType)(0), // 26: qan.v1beta1.ExampleType - (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue + (*GetSelectedQueryMetadataReply)(nil), // 18: qan.v1beta1.GetSelectedQueryMetadataReply + nil, // 19: qan.v1beta1.MetricsReply.MetricsEntry + nil, // 20: qan.v1beta1.MetricsReply.TextMetricsEntry + nil, // 21: qan.v1beta1.MetricsReply.TotalsEntry + nil, // 22: qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry + (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp + (*MapFieldEntry)(nil), // 24: qan.v1beta1.MapFieldEntry + (*Point)(nil), // 25: qan.v1beta1.Point + (ExampleFormat)(0), // 26: qan.v1beta1.ExampleFormat + (ExampleType)(0), // 27: qan.v1beta1.ExampleType + (*wrapperspb.BoolValue)(nil), // 28: google.protobuf.BoolValue } ) var file_qanpb_object_details_proto_depIdxs = []int32{ - 22, // 0: qan.v1beta1.MetricsRequest.period_start_from:type_name -> google.protobuf.Timestamp - 22, // 1: qan.v1beta1.MetricsRequest.period_start_to:type_name -> google.protobuf.Timestamp - 23, // 2: qan.v1beta1.MetricsRequest.labels:type_name -> qan.v1beta1.MapFieldEntry - 18, // 3: qan.v1beta1.MetricsReply.metrics:type_name -> qan.v1beta1.MetricsReply.MetricsEntry - 19, // 4: qan.v1beta1.MetricsReply.text_metrics:type_name -> qan.v1beta1.MetricsReply.TextMetricsEntry - 24, // 5: qan.v1beta1.MetricsReply.sparkline:type_name -> qan.v1beta1.Point - 20, // 6: qan.v1beta1.MetricsReply.totals:type_name -> qan.v1beta1.MetricsReply.TotalsEntry - 22, // 7: qan.v1beta1.QueryExampleRequest.period_start_from:type_name -> google.protobuf.Timestamp - 22, // 8: qan.v1beta1.QueryExampleRequest.period_start_to:type_name -> google.protobuf.Timestamp - 23, // 9: qan.v1beta1.QueryExampleRequest.labels:type_name -> qan.v1beta1.MapFieldEntry - 6, // 10: qan.v1beta1.QueryExampleReply.query_examples:type_name -> qan.v1beta1.QueryExample - 25, // 11: qan.v1beta1.QueryExample.example_format:type_name -> qan.v1beta1.ExampleFormat - 26, // 12: qan.v1beta1.QueryExample.example_type:type_name -> qan.v1beta1.ExampleType - 22, // 13: qan.v1beta1.ObjectDetailsLabelsRequest.period_start_from:type_name -> google.protobuf.Timestamp - 22, // 14: qan.v1beta1.ObjectDetailsLabelsRequest.period_start_to:type_name -> google.protobuf.Timestamp - 21, // 15: qan.v1beta1.ObjectDetailsLabelsReply.labels:type_name -> qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry - 22, // 16: qan.v1beta1.HistogramRequest.period_start_from:type_name -> google.protobuf.Timestamp - 22, // 17: qan.v1beta1.HistogramRequest.period_start_to:type_name -> google.protobuf.Timestamp - 23, // 18: qan.v1beta1.HistogramRequest.labels:type_name -> qan.v1beta1.MapFieldEntry - 14, // 19: qan.v1beta1.HistogramReply.histogram_items:type_name -> qan.v1beta1.HistogramItem - 2, // 20: qan.v1beta1.MetricsReply.MetricsEntry.value:type_name -> qan.v1beta1.MetricValues - 2, // 21: qan.v1beta1.MetricsReply.TotalsEntry.value:type_name -> qan.v1beta1.MetricValues - 9, // 22: qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry.value:type_name -> qan.v1beta1.ListLabelValues - 0, // 23: qan.v1beta1.ObjectDetails.GetMetrics:input_type -> qan.v1beta1.MetricsRequest - 4, // 24: qan.v1beta1.ObjectDetails.GetQueryExample:input_type -> qan.v1beta1.QueryExampleRequest - 7, // 25: qan.v1beta1.ObjectDetails.GetLabels:input_type -> qan.v1beta1.ObjectDetailsLabelsRequest - 10, // 26: qan.v1beta1.ObjectDetails.GetQueryPlan:input_type -> qan.v1beta1.QueryPlanRequest - 12, // 27: qan.v1beta1.ObjectDetails.GetHistogram:input_type -> qan.v1beta1.HistogramRequest - 15, // 28: qan.v1beta1.ObjectDetails.QueryExists:input_type -> qan.v1beta1.QueryExistsRequest - 16, // 29: qan.v1beta1.ObjectDetails.ExplainFingerprintByQueryID:input_type -> qan.v1beta1.ExplainFingerprintByQueryIDRequest - 1, // 30: qan.v1beta1.ObjectDetails.GetMetrics:output_type -> qan.v1beta1.MetricsReply - 5, // 31: qan.v1beta1.ObjectDetails.GetQueryExample:output_type -> qan.v1beta1.QueryExampleReply - 8, // 32: qan.v1beta1.ObjectDetails.GetLabels:output_type -> qan.v1beta1.ObjectDetailsLabelsReply - 11, // 33: qan.v1beta1.ObjectDetails.GetQueryPlan:output_type -> qan.v1beta1.QueryPlanReply - 13, // 34: qan.v1beta1.ObjectDetails.GetHistogram:output_type -> qan.v1beta1.HistogramReply - 27, // 35: qan.v1beta1.ObjectDetails.QueryExists:output_type -> google.protobuf.BoolValue - 17, // 36: qan.v1beta1.ObjectDetails.ExplainFingerprintByQueryID:output_type -> qan.v1beta1.ExplainFingerprintByQueryIDReply - 30, // [30:37] is the sub-list for method output_type - 23, // [23:30] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name + 23, // 0: qan.v1beta1.MetricsRequest.period_start_from:type_name -> google.protobuf.Timestamp + 23, // 1: qan.v1beta1.MetricsRequest.period_start_to:type_name -> google.protobuf.Timestamp + 24, // 2: qan.v1beta1.MetricsRequest.labels:type_name -> qan.v1beta1.MapFieldEntry + 19, // 3: qan.v1beta1.MetricsReply.metrics:type_name -> qan.v1beta1.MetricsReply.MetricsEntry + 20, // 4: qan.v1beta1.MetricsReply.text_metrics:type_name -> qan.v1beta1.MetricsReply.TextMetricsEntry + 25, // 5: qan.v1beta1.MetricsReply.sparkline:type_name -> qan.v1beta1.Point + 21, // 6: qan.v1beta1.MetricsReply.totals:type_name -> qan.v1beta1.MetricsReply.TotalsEntry + 18, // 7: qan.v1beta1.MetricsReply.metadata:type_name -> qan.v1beta1.GetSelectedQueryMetadataReply + 23, // 8: qan.v1beta1.QueryExampleRequest.period_start_from:type_name -> google.protobuf.Timestamp + 23, // 9: qan.v1beta1.QueryExampleRequest.period_start_to:type_name -> google.protobuf.Timestamp + 24, // 10: qan.v1beta1.QueryExampleRequest.labels:type_name -> qan.v1beta1.MapFieldEntry + 6, // 11: qan.v1beta1.QueryExampleReply.query_examples:type_name -> qan.v1beta1.QueryExample + 26, // 12: qan.v1beta1.QueryExample.example_format:type_name -> qan.v1beta1.ExampleFormat + 27, // 13: qan.v1beta1.QueryExample.example_type:type_name -> qan.v1beta1.ExampleType + 23, // 14: qan.v1beta1.ObjectDetailsLabelsRequest.period_start_from:type_name -> google.protobuf.Timestamp + 23, // 15: qan.v1beta1.ObjectDetailsLabelsRequest.period_start_to:type_name -> google.protobuf.Timestamp + 22, // 16: qan.v1beta1.ObjectDetailsLabelsReply.labels:type_name -> qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry + 23, // 17: qan.v1beta1.HistogramRequest.period_start_from:type_name -> google.protobuf.Timestamp + 23, // 18: qan.v1beta1.HistogramRequest.period_start_to:type_name -> google.protobuf.Timestamp + 24, // 19: qan.v1beta1.HistogramRequest.labels:type_name -> qan.v1beta1.MapFieldEntry + 14, // 20: qan.v1beta1.HistogramReply.histogram_items:type_name -> qan.v1beta1.HistogramItem + 2, // 21: qan.v1beta1.MetricsReply.MetricsEntry.value:type_name -> qan.v1beta1.MetricValues + 2, // 22: qan.v1beta1.MetricsReply.TotalsEntry.value:type_name -> qan.v1beta1.MetricValues + 9, // 23: qan.v1beta1.ObjectDetailsLabelsReply.LabelsEntry.value:type_name -> qan.v1beta1.ListLabelValues + 0, // 24: qan.v1beta1.ObjectDetails.GetMetrics:input_type -> qan.v1beta1.MetricsRequest + 4, // 25: qan.v1beta1.ObjectDetails.GetQueryExample:input_type -> qan.v1beta1.QueryExampleRequest + 7, // 26: qan.v1beta1.ObjectDetails.GetLabels:input_type -> qan.v1beta1.ObjectDetailsLabelsRequest + 10, // 27: qan.v1beta1.ObjectDetails.GetQueryPlan:input_type -> qan.v1beta1.QueryPlanRequest + 12, // 28: qan.v1beta1.ObjectDetails.GetHistogram:input_type -> qan.v1beta1.HistogramRequest + 15, // 29: qan.v1beta1.ObjectDetails.QueryExists:input_type -> qan.v1beta1.QueryExistsRequest + 16, // 30: qan.v1beta1.ObjectDetails.ExplainFingerprintByQueryID:input_type -> qan.v1beta1.ExplainFingerprintByQueryIDRequest + 1, // 31: qan.v1beta1.ObjectDetails.GetMetrics:output_type -> qan.v1beta1.MetricsReply + 5, // 32: qan.v1beta1.ObjectDetails.GetQueryExample:output_type -> qan.v1beta1.QueryExampleReply + 8, // 33: qan.v1beta1.ObjectDetails.GetLabels:output_type -> qan.v1beta1.ObjectDetailsLabelsReply + 11, // 34: qan.v1beta1.ObjectDetails.GetQueryPlan:output_type -> qan.v1beta1.QueryPlanReply + 13, // 35: qan.v1beta1.ObjectDetails.GetHistogram:output_type -> qan.v1beta1.HistogramReply + 28, // 36: qan.v1beta1.ObjectDetails.QueryExists:output_type -> google.protobuf.BoolValue + 17, // 37: qan.v1beta1.ObjectDetails.ExplainFingerprintByQueryID:output_type -> qan.v1beta1.ExplainFingerprintByQueryIDReply + 31, // [31:38] is the sub-list for method output_type + 24, // [24:31] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_qanpb_object_details_proto_init() } @@ -1856,6 +2032,18 @@ func file_qanpb_object_details_proto_init() { return nil } } + file_qanpb_object_details_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSelectedQueryMetadataReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1863,7 +2051,7 @@ func file_qanpb_object_details_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_qanpb_object_details_proto_rawDesc, NumEnums: 0, - NumMessages: 22, + NumMessages: 23, NumExtensions: 0, NumServices: 1, }, diff --git a/api/qanpb/object_details.pb.validate.go b/api/qanpb/object_details.pb.validate.go index d665b46a30..b59b656fb9 100644 --- a/api/qanpb/object_details.pb.validate.go +++ b/api/qanpb/object_details.pb.validate.go @@ -385,6 +385,35 @@ func (m *MetricsReply) validate(all bool) error { // no validation rules for Fingerprint + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetricsReplyValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetricsReplyValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetricsReplyValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return MetricsReplyMultiError(errors) } @@ -2505,3 +2534,130 @@ var _ interface { Cause() error ErrorName() string } = ExplainFingerprintByQueryIDReplyValidationError{} + +// Validate checks the field values on GetSelectedQueryMetadataReply with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetSelectedQueryMetadataReply) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetSelectedQueryMetadataReply with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetSelectedQueryMetadataReplyMultiError, or nil if none found. +func (m *GetSelectedQueryMetadataReply) ValidateAll() error { + return m.validate(true) +} + +func (m *GetSelectedQueryMetadataReply) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ServiceName + + // no validation rules for Database + + // no validation rules for Schema + + // no validation rules for Username + + // no validation rules for ReplicationSet + + // no validation rules for Cluster + + // no validation rules for ServiceType + + // no validation rules for ServiceId + + // no validation rules for Environment + + // no validation rules for NodeId + + // no validation rules for NodeName + + // no validation rules for NodeType + + if len(errors) > 0 { + return GetSelectedQueryMetadataReplyMultiError(errors) + } + + return nil +} + +// GetSelectedQueryMetadataReplyMultiError is an error wrapping multiple +// validation errors returned by GetSelectedQueryMetadataReply.ValidateAll() +// if the designated constraints aren't met. +type GetSelectedQueryMetadataReplyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetSelectedQueryMetadataReplyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetSelectedQueryMetadataReplyMultiError) AllErrors() []error { return m } + +// GetSelectedQueryMetadataReplyValidationError is the validation error +// returned by GetSelectedQueryMetadataReply.Validate if the designated +// constraints aren't met. +type GetSelectedQueryMetadataReplyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetSelectedQueryMetadataReplyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetSelectedQueryMetadataReplyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetSelectedQueryMetadataReplyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetSelectedQueryMetadataReplyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetSelectedQueryMetadataReplyValidationError) ErrorName() string { + return "GetSelectedQueryMetadataReplyValidationError" +} + +// Error satisfies the builtin error interface +func (e GetSelectedQueryMetadataReplyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetSelectedQueryMetadataReply.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetSelectedQueryMetadataReplyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetSelectedQueryMetadataReplyValidationError{} diff --git a/api/qanpb/object_details.proto b/api/qanpb/object_details.proto index d8a1e7303e..b96472e76b 100644 --- a/api/qanpb/object_details.proto +++ b/api/qanpb/object_details.proto @@ -83,6 +83,7 @@ message MetricsReply { repeated Point sparkline = 4; map totals = 5; string fingerprint = 6; + GetSelectedQueryMetadataReply metadata = 8; } // MetricValues is statistics of specific metric. @@ -205,3 +206,19 @@ message ExplainFingerprintByQueryIDReply { string explain_fingerprint = 1; uint32 placeholders_count = 2; } + +// GetSlecetedQueryMetadataReply consists selected query metadata to show in details for given query ID. +message GetSelectedQueryMetadataReply { + string service_name = 1; + string database = 2; + string schema = 3; + string username = 4; + string replication_set = 5; + string cluster = 6; + string service_type = 7; + string service_id = 8; + string environment = 9; + string node_id = 10; + string node_name = 11; + string node_type = 12; +} diff --git a/api/swagger/swagger-dev.json b/api/swagger/swagger-dev.json index ca384191fa..4a4147809a 100644 --- a/api/swagger/swagger-dev.json +++ b/api/swagger/swagger-dev.json @@ -1769,6 +1769,61 @@ "fingerprint": { "type": "string", "x-order": 4 + }, + "metadata": { + "description": "GetSlecetedQueryMetadataReply consists selected query metadata to show in details for given query ID.", + "type": "object", + "properties": { + "service_name": { + "type": "string", + "x-order": 0 + }, + "database": { + "type": "string", + "x-order": 1 + }, + "schema": { + "type": "string", + "x-order": 2 + }, + "username": { + "type": "string", + "x-order": 3 + }, + "replication_set": { + "type": "string", + "x-order": 4 + }, + "cluster": { + "type": "string", + "x-order": 5 + }, + "service_type": { + "type": "string", + "x-order": 6 + }, + "service_id": { + "type": "string", + "x-order": 7 + }, + "environment": { + "type": "string", + "x-order": 8 + }, + "node_id": { + "type": "string", + "x-order": 9 + }, + "node_name": { + "type": "string", + "x-order": 10 + }, + "node_type": { + "type": "string", + "x-order": 11 + } + }, + "x-order": 5 } } } @@ -18226,16 +18281,23 @@ "operationId": "ListAgentsMixin10", "parameters": [ { + "description": "Only one of the parameters below must be set.", "name": "body", "in": "body", "required": true, "schema": { + "description": "Only one of the parameters below must be set.", "type": "object", "properties": { "service_id": { "description": "Return only Agents that relate to a specific ServiceID.", "type": "string", "x-order": 0 + }, + "node_id": { + "description": "Return only Agents that relate to a specific NodeID.", + "type": "string", + "x-order": 1 } } } @@ -18254,7 +18316,7 @@ "type": "object", "properties": { "agent_id": { - "description": "Unique randomly generated instance identifier.", + "description": "Unique agent identifier.", "type": "string", "x-order": 0 }, @@ -28043,6 +28105,452 @@ } } }, + "/v1/management/Node/Get": { + "post": { + "description": "Returns a single Node by ID.", + "tags": [ + "MgmtNode" + ], + "summary": "Get Node", + "operationId": "GetNodeMixin11", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + } + } + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "node": { + "type": "object", + "properties": { + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + }, + "node_type": { + "description": "Node type.", + "type": "string", + "x-order": 1 + }, + "node_name": { + "description": "User-defined node name.", + "type": "string", + "x-order": 2 + }, + "machine_id": { + "description": "Linux machine-id.", + "type": "string", + "x-order": 3 + }, + "distro": { + "description": "Linux distribution name and version.", + "type": "string", + "x-order": 4 + }, + "node_model": { + "description": "Node model.", + "type": "string", + "x-order": 5 + }, + "container_id": { + "description": "A node's unique docker container identifier.", + "type": "string", + "x-order": 6 + }, + "container_name": { + "description": "Container name.", + "type": "string", + "x-order": 7 + }, + "address": { + "description": "Node address (DNS name or IP).", + "type": "string", + "x-order": 8 + }, + "region": { + "description": "Node region.", + "type": "string", + "x-order": 9 + }, + "az": { + "description": "Node availability zone.", + "type": "string", + "x-order": 10 + }, + "custom_labels": { + "description": "Custom user-assigned labels for Node.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-order": 11 + }, + "created_at": { + "description": "Creation timestamp.", + "type": "string", + "format": "date-time", + "x-order": 12 + }, + "updated_at": { + "description": "Last update timestamp.", + "type": "string", + "format": "date-time", + "x-order": 13 + }, + "status": { + "description": "Node status.\n\n - STATUS_INVALID: Invalid status.\n - UP: The node is up.\n - DOWN: The node is down.\n - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet).", + "type": "string", + "default": "STATUS_INVALID", + "enum": [ + "STATUS_INVALID", + "UP", + "DOWN", + "UNKNOWN" + ], + "x-order": 14 + }, + "agents": { + "description": "List of agents related to this node.", + "type": "array", + "items": { + "type": "object", + "properties": { + "agent_id": { + "description": "Unique Agent identifier.", + "type": "string", + "x-order": 0 + }, + "agent_type": { + "description": "Agent type.", + "type": "string", + "x-order": 1 + }, + "status": { + "description": "Actual Agent status.", + "type": "string", + "x-order": 2 + }, + "is_connected": { + "description": "True if Agent is running and connected to pmm-managed.", + "type": "boolean", + "x-order": 3 + } + } + }, + "x-order": 15 + }, + "services": { + "description": "List of services running on this node.", + "type": "array", + "items": { + "description": "Service represents a service running on a node.", + "type": "object", + "properties": { + "service_id": { + "description": "Unique Service identifier.", + "type": "string", + "x-order": 0 + }, + "service_type": { + "description": "Service type.", + "type": "string", + "x-order": 1 + }, + "service_name": { + "description": "Service name.", + "type": "string", + "x-order": 2 + } + } + }, + "x-order": 16 + } + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "message": { + "type": "string", + "x-order": 1 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": false + }, + "x-order": 2 + } + } + } + } + } + } + }, + "/v1/management/Node/List": { + "post": { + "description": "Returns a filtered list of Nodes.", + "tags": [ + "MgmtNode" + ], + "summary": "List Nodes", + "operationId": "ListNodesMixin11", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "object", + "properties": { + "node_type": { + "description": "NodeType describes supported Node types.", + "type": "string", + "default": "NODE_TYPE_INVALID", + "enum": [ + "NODE_TYPE_INVALID", + "GENERIC_NODE", + "CONTAINER_NODE", + "REMOTE_NODE", + "REMOTE_RDS_NODE", + "REMOTE_AZURE_DATABASE_NODE" + ], + "x-order": 0 + } + } + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "nodes": { + "type": "array", + "items": { + "type": "object", + "properties": { + "node_id": { + "description": "Unique Node identifier.", + "type": "string", + "x-order": 0 + }, + "node_type": { + "description": "Node type.", + "type": "string", + "x-order": 1 + }, + "node_name": { + "description": "User-defined node name.", + "type": "string", + "x-order": 2 + }, + "machine_id": { + "description": "Linux machine-id.", + "type": "string", + "x-order": 3 + }, + "distro": { + "description": "Linux distribution name and version.", + "type": "string", + "x-order": 4 + }, + "node_model": { + "description": "Node model.", + "type": "string", + "x-order": 5 + }, + "container_id": { + "description": "A node's unique docker container identifier.", + "type": "string", + "x-order": 6 + }, + "container_name": { + "description": "Container name.", + "type": "string", + "x-order": 7 + }, + "address": { + "description": "Node address (DNS name or IP).", + "type": "string", + "x-order": 8 + }, + "region": { + "description": "Node region.", + "type": "string", + "x-order": 9 + }, + "az": { + "description": "Node availability zone.", + "type": "string", + "x-order": 10 + }, + "custom_labels": { + "description": "Custom user-assigned labels for Node.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-order": 11 + }, + "created_at": { + "description": "Creation timestamp.", + "type": "string", + "format": "date-time", + "x-order": 12 + }, + "updated_at": { + "description": "Last update timestamp.", + "type": "string", + "format": "date-time", + "x-order": 13 + }, + "status": { + "description": "Node status.\n\n - STATUS_INVALID: Invalid status.\n - UP: The node is up.\n - DOWN: The node is down.\n - UNKNOWN: The node's status cannot be known (e.g. there are no metrics yet).", + "type": "string", + "default": "STATUS_INVALID", + "enum": [ + "STATUS_INVALID", + "UP", + "DOWN", + "UNKNOWN" + ], + "x-order": 14 + }, + "agents": { + "description": "List of agents related to this node.", + "type": "array", + "items": { + "type": "object", + "properties": { + "agent_id": { + "description": "Unique Agent identifier.", + "type": "string", + "x-order": 0 + }, + "agent_type": { + "description": "Agent type.", + "type": "string", + "x-order": 1 + }, + "status": { + "description": "Actual Agent status.", + "type": "string", + "x-order": 2 + }, + "is_connected": { + "description": "True if Agent is running and connected to pmm-managed.", + "type": "boolean", + "x-order": 3 + } + } + }, + "x-order": 15 + }, + "services": { + "description": "List of services running on this node.", + "type": "array", + "items": { + "description": "Service represents a service running on a node.", + "type": "object", + "properties": { + "service_id": { + "description": "Unique Service identifier.", + "type": "string", + "x-order": 0 + }, + "service_type": { + "description": "Service type.", + "type": "string", + "x-order": 1 + }, + "service_name": { + "description": "Service name.", + "type": "string", + "x-order": 2 + } + } + }, + "x-order": 16 + } + } + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "message": { + "type": "string", + "x-order": 1 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": false + }, + "x-order": 2 + } + } + } + } + } + } + }, "/v1/management/Node/Register": { "post": { "description": "Registers a new Node and pmm-agent.", @@ -31789,7 +32297,7 @@ "MgmtService" ], "summary": "List Services", - "operationId": "ListServicesMixin11", + "operationId": "ListServicesMixin12", "parameters": [ { "name": "body", @@ -31932,7 +32440,7 @@ "type": "object", "properties": { "agent_id": { - "description": "Unique randomly generated instance identifier.", + "description": "Unique agent identifier.", "type": "string", "x-order": 0 }, @@ -32242,7 +32750,7 @@ "x-order": 16 }, "status": { - "description": "Service status.\n\n - STATUS_INVALID: In case we don't support the db vendor yet.\n - UP: The service is up.\n - DOWN: The service is down.\n - UNKNOWN: The service's status cannot be known (p.e. there are no metrics yet).", + "description": "Service status.\n\n - STATUS_INVALID: In case we don't support the db vendor yet.\n - UP: The service is up.\n - DOWN: The service is down.\n - UNKNOWN: The service's status cannot be known (e.g. there are no metrics yet).", "type": "string", "default": "STATUS_INVALID", "enum": [ @@ -33593,7 +34101,8 @@ "BACKUP_STATUS_SUCCESS", "BACKUP_STATUS_ERROR", "BACKUP_STATUS_DELETING", - "BACKUP_STATUS_FAILED_TO_DELETE" + "BACKUP_STATUS_FAILED_TO_DELETE", + "BACKUP_STATUS_CLEANUP_IN_PROGRESS" ], "x-order": 8 }, @@ -33614,6 +34123,64 @@ "PITR" ], "x-order": 10 + }, + "is_sharded_cluster": { + "description": "Source database setup type.", + "type": "boolean", + "x-order": 11 + }, + "folder": { + "description": "Folder to store artifact on a storage.", + "type": "string", + "x-order": 12 + }, + "metadata_list": { + "description": "List of artifact metadata.", + "type": "array", + "items": { + "description": "Metadata contains extra artifact data like files it consists of, tool specific data, etc.", + "type": "object", + "properties": { + "file_list": { + "description": "List of files backup consists of.", + "type": "array", + "items": { + "description": "File represents file or folder on a storage.", + "type": "object", + "properties": { + "name": { + "type": "string", + "x-order": 0 + }, + "is_directory": { + "type": "boolean", + "x-order": 1 + } + } + }, + "x-order": 0 + }, + "restore_to": { + "description": "Exact time DB can be restored to.", + "type": "string", + "format": "date-time", + "x-order": 1 + }, + "pbm_metadata": { + "description": "PbmMetadata contains additional data for pbm cli tools.", + "type": "object", + "properties": { + "name": { + "description": "Name of backup in backup tool representation.", + "type": "string", + "x-order": 0 + } + }, + "x-order": 2 + } + } + }, + "x-order": 13 } } }, @@ -34284,6 +34851,11 @@ "PITR" ], "x-order": 17 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 18 } } }, @@ -34564,6 +35136,11 @@ "LOGICAL" ], "x-order": 11 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 12 } } } @@ -34673,6 +35250,11 @@ "LOGICAL" ], "x-order": 6 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 7 } } } @@ -38762,6 +39344,9 @@ { "name": "Agent" }, + { + "name": "MgmtNode" + }, { "name": "MgmtService" }, diff --git a/api/swagger/swagger.json b/api/swagger/swagger.json index 5a0c19bcc1..1dc78e76c5 100644 --- a/api/swagger/swagger.json +++ b/api/swagger/swagger.json @@ -21704,7 +21704,8 @@ "BACKUP_STATUS_SUCCESS", "BACKUP_STATUS_ERROR", "BACKUP_STATUS_DELETING", - "BACKUP_STATUS_FAILED_TO_DELETE" + "BACKUP_STATUS_FAILED_TO_DELETE", + "BACKUP_STATUS_CLEANUP_IN_PROGRESS" ], "x-order": 8 }, @@ -21725,6 +21726,64 @@ "PITR" ], "x-order": 10 + }, + "is_sharded_cluster": { + "description": "Source database setup type.", + "type": "boolean", + "x-order": 11 + }, + "folder": { + "description": "Folder to store artifact on a storage.", + "type": "string", + "x-order": 12 + }, + "metadata_list": { + "description": "List of artifact metadata.", + "type": "array", + "items": { + "description": "Metadata contains extra artifact data like files it consists of, tool specific data, etc.", + "type": "object", + "properties": { + "file_list": { + "description": "List of files backup consists of.", + "type": "array", + "items": { + "description": "File represents file or folder on a storage.", + "type": "object", + "properties": { + "name": { + "type": "string", + "x-order": 0 + }, + "is_directory": { + "type": "boolean", + "x-order": 1 + } + } + }, + "x-order": 0 + }, + "restore_to": { + "description": "Exact time DB can be restored to.", + "type": "string", + "format": "date-time", + "x-order": 1 + }, + "pbm_metadata": { + "description": "PbmMetadata contains additional data for pbm cli tools.", + "type": "object", + "properties": { + "name": { + "description": "Name of backup in backup tool representation.", + "type": "string", + "x-order": 0 + } + }, + "x-order": 2 + } + } + }, + "x-order": 13 } } }, @@ -22395,6 +22454,11 @@ "PITR" ], "x-order": 17 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 18 } } }, @@ -22675,6 +22739,11 @@ "LOGICAL" ], "x-order": 11 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 12 } } } @@ -22784,6 +22853,11 @@ "LOGICAL" ], "x-order": 6 + }, + "folder": { + "description": "Folder on storage for artifact.", + "type": "string", + "x-order": 7 } } } diff --git a/api/uieventspb/server.pb.validate.go b/api/uieventspb/server.pb.validate.go new file mode 100644 index 0000000000..3bfe724672 --- /dev/null +++ b/api/uieventspb/server.pb.validate.go @@ -0,0 +1,809 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: uieventspb/server.proto + +package uieventspb + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on NotificationEvent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *NotificationEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NotificationEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NotificationEventMultiError, or nil if none found. +func (m *NotificationEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *NotificationEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Title + + // no validation rules for Text + + // no validation rules for Location + + // no validation rules for LocationParams + + if len(errors) > 0 { + return NotificationEventMultiError(errors) + } + + return nil +} + +// NotificationEventMultiError is an error wrapping multiple validation errors +// returned by NotificationEvent.ValidateAll() if the designated constraints +// aren't met. +type NotificationEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NotificationEventMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NotificationEventMultiError) AllErrors() []error { return m } + +// NotificationEventValidationError is the validation error returned by +// NotificationEvent.Validate if the designated constraints aren't met. +type NotificationEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NotificationEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NotificationEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NotificationEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NotificationEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NotificationEventValidationError) ErrorName() string { + return "NotificationEventValidationError" +} + +// Error satisfies the builtin error interface +func (e NotificationEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNotificationEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NotificationEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NotificationEventValidationError{} + +// Validate checks the field values on FetchingEvent with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FetchingEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FetchingEvent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FetchingEventMultiError, or +// nil if none found. +func (m *FetchingEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *FetchingEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Component + + // no validation rules for LoadTime + + // no validation rules for Location + + // no validation rules for LocationParams + + if len(errors) > 0 { + return FetchingEventMultiError(errors) + } + + return nil +} + +// FetchingEventMultiError is an error wrapping multiple validation errors +// returned by FetchingEvent.ValidateAll() if the designated constraints +// aren't met. +type FetchingEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FetchingEventMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FetchingEventMultiError) AllErrors() []error { return m } + +// FetchingEventValidationError is the validation error returned by +// FetchingEvent.Validate if the designated constraints aren't met. +type FetchingEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FetchingEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FetchingEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FetchingEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FetchingEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FetchingEventValidationError) ErrorName() string { return "FetchingEventValidationError" } + +// Error satisfies the builtin error interface +func (e FetchingEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFetchingEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FetchingEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FetchingEventValidationError{} + +// Validate checks the field values on DashboardUsageEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DashboardUsageEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DashboardUsageEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DashboardUsageEventMultiError, or nil if none found. +func (m *DashboardUsageEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *DashboardUsageEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Uid + + // no validation rules for Title + + // no validation rules for LoadTime + + // no validation rules for Location + + // no validation rules for LocationParams + + if len(errors) > 0 { + return DashboardUsageEventMultiError(errors) + } + + return nil +} + +// DashboardUsageEventMultiError is an error wrapping multiple validation +// errors returned by DashboardUsageEvent.ValidateAll() if the designated +// constraints aren't met. +type DashboardUsageEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DashboardUsageEventMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DashboardUsageEventMultiError) AllErrors() []error { return m } + +// DashboardUsageEventValidationError is the validation error returned by +// DashboardUsageEvent.Validate if the designated constraints aren't met. +type DashboardUsageEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DashboardUsageEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DashboardUsageEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DashboardUsageEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DashboardUsageEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DashboardUsageEventValidationError) ErrorName() string { + return "DashboardUsageEventValidationError" +} + +// Error satisfies the builtin error interface +func (e DashboardUsageEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDashboardUsageEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DashboardUsageEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DashboardUsageEventValidationError{} + +// Validate checks the field values on UserFlowEvent with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *UserFlowEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UserFlowEvent with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in UserFlowEventMultiError, or +// nil if none found. +func (m *UserFlowEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *UserFlowEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for FlowId + + // no validation rules for StoryId + + // no validation rules for Event + + // no validation rules for Params + + if len(errors) > 0 { + return UserFlowEventMultiError(errors) + } + + return nil +} + +// UserFlowEventMultiError is an error wrapping multiple validation errors +// returned by UserFlowEvent.ValidateAll() if the designated constraints +// aren't met. +type UserFlowEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UserFlowEventMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UserFlowEventMultiError) AllErrors() []error { return m } + +// UserFlowEventValidationError is the validation error returned by +// UserFlowEvent.Validate if the designated constraints aren't met. +type UserFlowEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UserFlowEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UserFlowEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UserFlowEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UserFlowEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UserFlowEventValidationError) ErrorName() string { return "UserFlowEventValidationError" } + +// Error satisfies the builtin error interface +func (e UserFlowEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUserFlowEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UserFlowEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UserFlowEventValidationError{} + +// Validate checks the field values on StoreRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StoreRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StoreRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StoreRequestMultiError, or +// nil if none found. +func (m *StoreRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *StoreRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetNotifications() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("Notifications[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("Notifications[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StoreRequestValidationError{ + field: fmt.Sprintf("Notifications[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetFetching() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("Fetching[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("Fetching[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StoreRequestValidationError{ + field: fmt.Sprintf("Fetching[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetDashboardUsage() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("DashboardUsage[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("DashboardUsage[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StoreRequestValidationError{ + field: fmt.Sprintf("DashboardUsage[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetUserFlowEvents() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("UserFlowEvents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, StoreRequestValidationError{ + field: fmt.Sprintf("UserFlowEvents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return StoreRequestValidationError{ + field: fmt.Sprintf("UserFlowEvents[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return StoreRequestMultiError(errors) + } + + return nil +} + +// StoreRequestMultiError is an error wrapping multiple validation errors +// returned by StoreRequest.ValidateAll() if the designated constraints aren't met. +type StoreRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StoreRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StoreRequestMultiError) AllErrors() []error { return m } + +// StoreRequestValidationError is the validation error returned by +// StoreRequest.Validate if the designated constraints aren't met. +type StoreRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StoreRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StoreRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StoreRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StoreRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StoreRequestValidationError) ErrorName() string { return "StoreRequestValidationError" } + +// Error satisfies the builtin error interface +func (e StoreRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStoreRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StoreRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StoreRequestValidationError{} + +// Validate checks the field values on StoreResponse with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *StoreResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on StoreResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in StoreResponseMultiError, or +// nil if none found. +func (m *StoreResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *StoreResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return StoreResponseMultiError(errors) + } + + return nil +} + +// StoreResponseMultiError is an error wrapping multiple validation errors +// returned by StoreResponse.ValidateAll() if the designated constraints +// aren't met. +type StoreResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m StoreResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m StoreResponseMultiError) AllErrors() []error { return m } + +// StoreResponseValidationError is the validation error returned by +// StoreResponse.Validate if the designated constraints aren't met. +type StoreResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e StoreResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e StoreResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e StoreResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e StoreResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e StoreResponseValidationError) ErrorName() string { return "StoreResponseValidationError" } + +// Error satisfies the builtin error interface +func (e StoreResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sStoreResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = StoreResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = StoreResponseValidationError{} diff --git a/build/Makefile b/build/Makefile index c32cc33c26..8f46fb48e6 100644 --- a/build/Makefile +++ b/build/Makefile @@ -1,19 +1,37 @@ export PACKER_CACHE_DIR := .cache export PACKER_VERSION := 1.8.2 export CENTOS_ISO := 2004.01 +export ORACLE_VERSION := 4.2.6 # if pulled from vagrant repository (see below) ## ----------------- PACKER ------------------ fetch: mkdir -p ${PACKER_CACHE_DIR}/${CENTOS_ISO} || : test -f ${PACKER_CACHE_DIR}/id_rsa_vagrant \ - || curl -L https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant \ + || curl -L https://raw.githubusercontent.com/hashicorp/vagrant/master/keys/vagrant \ -o ${PACKER_CACHE_DIR}/id_rsa_vagrant chmod 600 ${PACKER_CACHE_DIR}/id_rsa_vagrant test -f ${PACKER_CACHE_DIR}/${CENTOS_ISO}/CentOS7.ova \ || wget --progress=dot:giga https://app.vagrantup.com/centos/boxes/7/versions/${CENTOS_ISO}/providers/virtualbox.box \ -O ${PACKER_CACHE_DIR}/${CENTOS_ISO}/CentOS7.ova test -f ${PACKER_CACHE_DIR}/${CENTOS_ISO}/box.ovf \ - || tar -C ${PACKER_CACHE_DIR}/${CENTOS_ISO} -xf ${PACKER_CACHE_DIR}/${CENTOS_ISO}/CentOS7.ova + || tar -C ${PACKER_CACHE_DIR}/${CENTOS_ISO} -xvf ${PACKER_CACHE_DIR}/${CENTOS_ISO}/CentOS7.ova + +fetch-el9: + mkdir -p ${PACKER_CACHE_DIR}/box || : + test -f ${PACKER_CACHE_DIR}/id_rsa_vagrant \ + || curl -L https://raw.githubusercontent.com/hashicorp/vagrant/master/keys/vagrant \ + -o ${PACKER_CACHE_DIR}/id_rsa_vagrant + chmod 600 ${PACKER_CACHE_DIR}/id_rsa_vagrant + test -f ${PACKER_CACHE_DIR}/box/oracle9.ova \ + || wget --progress=dot:giga https://vagrantcloud.com/bento/boxes/oracle-9.0/versions/202207.20.0/providers/virtualbox.box -O ${PACKER_CACHE_DIR}/box/oracle9.ova + #|| wget --progress=dot:giga https://yum.oracle.com/boxes/oraclelinux/ol9/OL9U1_x86_64-vagrant-virtualbox-b407.box -O ${PACKER_CACHE_DIR}/box/oracle9.ova + # https://app.vagrantup.com/generic/boxes/oracle9/versions/${ORACLE_VERSION}/providers/virtualbox.box + # https://yum.oracle.com/boxes/oraclelinux/ol9/OL9U1_x86_64-vagrant-virtualbox-b389.box + # https://yum.oracle.com/boxes/oraclelinux/ol8/OL8U7_x86_64-vagrant-virtualbox-b377.box + + # NOTE: image from vagrant registry is twice as large + test -f ${PACKER_CACHE_DIR}/box/box.ovf \ + || tar -C ${PACKER_CACHE_DIR}/box -xvf ${PACKER_CACHE_DIR}/box/oracle9.ova deps: mkdir -p ${PACKER_CACHE_DIR} ~/bin || : @@ -23,6 +41,23 @@ deps: pmm2-ovf: fetch packer build -only virtualbox-ovf packer/pmm2.json +pmm2-ovf-el9-rc: fetch-el9 + sed -i 's|become_method: su|become_method: sudo|g' update/tasks/roles/postgres/tasks/main.yml && \ + /usr/bin/packer build \ + -var 'pmm_client_repos=original testing' \ + -var 'pmm_client_repo_name=percona-testing-x86_64' \ + -var 'pmm2_server_repo=testing' \ + -only virtualbox-ovf -color=false packer/pmm2.el9.json \ + | tee build.log + +pmm2-ovf-el9-dev-latest: fetch-el9 + /usr/bin/packer build \ + -var 'pmm_client_repos=original experimental' \ + -var 'pmm_client_repo_name=percona-experimental-x86_64' \ + -var 'pmm2_server_repo=experimental' \ + -only virtualbox-ovf -color=false packer/pmm2.el9.json \ + | tee build.log + pmm2-digitalocean: packer build -only digitalocean -var 'single_disk=true' packer/pmm2.json @@ -32,17 +67,36 @@ pmm2-azure: pmm2-ami: docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ build -var 'pmm_client_repos=original experimental' \ - -var 'pmm_client_repo_name=percona-experimental-x86_64' \ - -var 'pmm2_server_repo=experimental' \ - -only amazon-ebs -color=false \ - packer/pmm2.json + -var 'pmm_client_repo_name=percona-experimental-x86_64' \ + -var 'pmm2_server_repo=experimental' \ + -only amazon-ebs -color=false \ + packer/pmm2.json pmm2-ami-rc: docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ build -var 'pmm_client_repos=original testing' \ - -var 'pmm_client_repo_name=percona-testing-x86_64' \ - -var 'pmm2_server_repo=testing' \ - -only amazon-ebs '-color=false' \ - packer/pmm2.json + -var 'pmm_client_repo_name=percona-testing-x86_64' \ + -var 'pmm2_server_repo=testing' \ + -only amazon-ebs '-color=false' \ + packer/pmm2.json + +pmm2-ami-el9: + docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ + build -var 'pmm_client_repos=original experimental' \ + -var 'pmm_client_repo_name=percona-experimental-x86_64' \ + -var 'pmm2_server_repo=experimental' \ + -only amazon-ebs -color=false \ + packer/pmm2.el9.json + +pmm2-ami-el9-rc: + mkdir -p update && \ + cp -r ../update/ansible/playbook/* update/ && \ + sed -i 's|become_method: su|become_method: sudo|g' update/tasks/roles/postgres/tasks/main.yml && \ + docker run --rm -v ${HOME}/.aws:/root/.aws -v `pwd`:/build -w /build hashicorp/packer:${PACKER_VERSION} \ + build -var 'pmm_client_repos=original testing' \ + -var 'pmm_client_repo_name=percona-testing-x86_64' \ + -var 'pmm2_server_repo=testing' \ + -only amazon-ebs '-color=false' \ + packer/pmm2.el9.json ## ----------------- PACKER ------------------ diff --git a/build/ansible/pmm2/post-build-actions.yml b/build/ansible/pmm2/post-build-actions.yml index 19ee9a0202..7268cb6f59 100644 --- a/build/ansible/pmm2/post-build-actions.yml +++ b/build/ansible/pmm2/post-build-actions.yml @@ -53,20 +53,37 @@ --server-address=127.0.0.1:443 --server-insecure-tls - - name: Reread supervisord configuration + - name: Reread supervisord configuration EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl reread register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: Reread supervisord configuration EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl reread + register: reread_result + changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: See what services are running debug: var=reread_result.stdout_lines - - name: Stop pmm-managed before deleting the database + - name: Stop pmm-managed before deleting the database EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' supervisorctl: name: pmm-managed state: stopped ignore_errors: True + - name: Stop pmm-managed before deleting the database EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + supervisorctl: + name: pmm-managed + state: stopped + supervisorctl_path: /usr/local/bin/supervisorctl + # become: true + # ignore_errors: True + - name: Supervisord stop | Stop supervisord service for AMI/OVF when: ansible_virtualization_type != "docker" service: name=supervisord state=stopped enabled=yes @@ -79,10 +96,18 @@ # so a temporary solution was to start it without supervisord and remove the # pmm-managed database/role. However, a complete overhaul of the pipeline is # necessary for a permanent fix. + - name: Stop PostgreSQL database without supervisord + command: /usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14 + become: yes + become_user: postgres + ignore_errors: yes + when: ansible_virtualization_type != "docker" + - name: Start PostgreSQL database without supervisord command: /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 become: yes become_user: postgres + ignore_errors: yes when: ansible_virtualization_type != "docker" - name: Remove pmm-managed database @@ -97,6 +122,7 @@ msg: "pmm-managed database was removed" when: db_check_result.changed == True + - name: Remove pmm-managed role from postgres postgresql_user: name: pmm-managed @@ -106,7 +132,20 @@ command: /usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14 become: yes become_user: postgres + ignore_errors: yes + when: ansible_virtualization_type != "docker" + + - name: Supervisord stop | Stop supervisord service for AMI/OVF when: ansible_virtualization_type != "docker" + service: name=supervisord state=stopped enabled=yes + + - name: Supervisord stop EL7 | Stop supervisord service for docker + when: ansible_virtualization_type == "docker" and ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + shell: supervisorctl shutdown + + - name: Supervisord stop EL9 | Stop supervisord service for docker + when: ansible_virtualization_type == "docker" and (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + shell: /usr/local/bin/supervisorctl shutdown - name: Cleanup yum cache | Cleanup yum cache command: yum clean all diff --git a/build/ansible/roles/ami-ovf/files/show-pmm-url b/build/ansible/roles/ami-ovf/files/show-pmm-url index b5cc1b864b..d97f8960ec 100644 --- a/build/ansible/roles/ami-ovf/files/show-pmm-url +++ b/build/ansible/roles/ami-ovf/files/show-pmm-url @@ -9,7 +9,7 @@ SOURCE=$( | python -c 'import json, sys; print json.load(sys.stdin)["v1"]["datasource"];' 2>/dev/null ) -IP=$(ip route get 1 2>/dev/null | awk '{print $NF;exit}') +IP=$(ip route get 1 2>/dev/null | awk '{print $7;exit}') if [ "x$SOURCE" = "xDataSourceEc2" ]; then IP=$(curl --connect-timeout 5 -s http://169.254.169.254/latest/meta-data/public-ipv4) fi diff --git a/build/ansible/roles/ami-ovf/tasks/main.yml b/build/ansible/roles/ami-ovf/tasks/main.yml index ac196049da..c7affc1f58 100644 --- a/build/ansible/roles/ami-ovf/tasks/main.yml +++ b/build/ansible/roles/ami-ovf/tasks/main.yml @@ -1,46 +1,58 @@ --- - - name: Packages | Remove PMM2 Testing Server repository - yum_repository: - name: pmm2-server - state: absent - - - name: Packages | Clean up yum metadata - command: yum clean metadata - args: - warn: no - - - name: Packages | Add PMM2 release Server YUM repository - yum_repository: - name: pmm2-server - description: PMM Server YUM repository - x86_64 - baseurl: https://repo.percona.com/pmm2-components/yum/release/7/RPMS/x86_64/ - gpgcheck: yes - enabled: yes - gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY - - - name: Disable SELinux - selinux: - policy: targeted - state: permissive - - - name: PMM | Add firewalld rule - firewalld: port={{ item }} permanent=true state=enabled immediate=yes - with_items: - - 80/tcp - - 443/tcp - - - name: PMM | Add script which show PMM URL - copy: - src: show-pmm-url - dest: /var/lib/cloud/scripts/per-boot/show-pmm-url - mode: 0755 - - - name: PMM | Delete centos - shell: cd /tmp; nohup sh -c "trap 'userdel -r centos' EXIT; sleep 600" /dev/null 2>&1 & - - - name: PMM | Delete vagrant - shell: cd /tmp; nohup sh -c "trap 'userdel -r vagrant' EXIT; sleep 600" /dev/null 2>&1 & - - - name: PMM | Delete Azure user - shell: cd /tmp; nohup sh -c "trap '/usr/sbin/waagent -force -deprovision+user && sync' EXIT; sleep 600" /dev/null 2>&1 & - +- name: Packages | Remove PMM2 Server testing repository + yum_repository: + name: pmm2-server + state: absent + +- name: Packages | Clean up yum metadata + command: yum clean metadata + +- name: Packages | Add PMM2 Server release repository for EL7 + when: + - ansible_distribution == 'CentOS' + - ansible_distribution_major_version == '7' + yum_repository: + name: pmm2-server + description: PMM Server YUM repository - x86_64 + baseurl: https://repo.percona.com/pmm2-components/yum/release/7/RPMS/x86_64/ + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + +- name: Packages | Add PMM2 Server release repository for EL9 + when: + - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' + - ansible_distribution_major_version == '9' + yum_repository: + name: pmm2-server + description: PMM Server YUM repository - x86_64 + baseurl: https://repo.percona.com/pmm2-components/yum/release/9/RPMS/x86_64/ + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + +- name: Disable SELinux + selinux: + policy: targeted + state: permissive + +- name: PMM | Add firewalld rule + firewalld: port={{ item }} permanent=true state=enabled immediate=yes + with_items: + - 80/tcp + - 443/tcp + +- name: PMM | Add script which show PMM URL + copy: + src: show-pmm-url + dest: /var/lib/cloud/scripts/per-boot/show-pmm-url + mode: 0755 + +- name: PMM | Delete centos + shell: cd /tmp; nohup sh -c "trap 'userdel -r centos' EXIT; sleep 600" /dev/null 2>&1 & + +- name: PMM | Delete vagrant + shell: cd /tmp; nohup sh -c "trap 'userdel -r vagrant' EXIT; sleep 600" /dev/null 2>&1 & + +- name: PMM | Delete Azure user + shell: cd /tmp; nohup sh -c "trap '/usr/sbin/waagent -force -deprovision+user && sync' EXIT; sleep 600" /dev/null 2>&1 & diff --git a/build/ansible/roles/cloud-node/files/show-url b/build/ansible/roles/cloud-node/files/show-url index 9733690ee6..a72488a9b0 100644 --- a/build/ansible/roles/cloud-node/files/show-url +++ b/build/ansible/roles/cloud-node/files/show-url @@ -1,13 +1,16 @@ #!/bin/sh PATH=/bin:/sbin +SOURCE= -SOURCE=$( - cat /var/lib/cloud/data/status.json 2>/dev/null \ - | python -c 'import json, sys; print json.load(sys.stdin)["v1"]["datasource"];' 2>/dev/null -) +if [ -f /var/lib/cloud/data/status.json ]; then + SOURCE=$( + cat /var/lib/cloud/data/status.json 2>/dev/null \ + | python -c 'import json, sys; print json.load(sys.stdin)["v1"]["datasource"];' 2>/dev/null + ) +fi -IP=$(ip route get 1 2>/dev/null | awk '{print $NF;exit}') +IP=$(ip route get 1 2>/dev/null | awk '{print $7;exit}') if [ "x$SOURCE" = "xDataSourceEc2" ]; then IP=$(curl --connect-timeout 5 -s http://169.254.169.254/latest/meta-data/public-ipv4) fi diff --git a/build/ansible/roles/cloud-node/tasks/main.yml b/build/ansible/roles/cloud-node/tasks/main.yml index dbb871b4e6..2a0305fc36 100644 --- a/build/ansible/roles/cloud-node/tasks/main.yml +++ b/build/ansible/roles/cloud-node/tasks/main.yml @@ -1,148 +1,183 @@ --- # Common things for all cloud images - - name: Packages | Add EPEL repository - when: ansible_os_family == 'RedHat' - yum: - name: https://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/e/epel-release-7-14.noarch.rpm - state: installed - - - name: Packages | Install OS tools - when: ansible_os_family == 'RedHat' - yum: - name: - - screen - - yum-utils - - cloud-init - - firewalld - - python2-pip - - ansible - - - name: Firewalld | Start - when: ansible_os_family == 'RedHat' - service: name=firewalld state=started enabled=yes - - - name: cleanup cache | Cleanup cache - file: path={{ item }} state=absent - with_items: - - /var/lib/cloud/sem - - /var/lib/cloud/data - - /var/lib/cloud/instance - - /var/lib/cloud/instances - - /var/log/cloud-init.log - - /var/log/cloud-init-output.log - - - name: create dir | Create getty@.service.d directory - file: - path: /etc/systemd/system/getty@.service.d - state: directory - - - name: cloud-init | Disable console cleanup - copy: - content: | - [Service] - TTYVTDisallocate=no - dest: /etc/systemd/system/getty@.service.d/nodisallocate.conf - mode: 0644 - - - name: root password | Set root password - when: ansible_virtualization_type == "virtualbox" - user: - name: root - password: "$6$J7pGg2a7vuRTbTV5$vORqkiAKdkyomU3iYwr/SPn.yLIkGsl5ludEx5DUvGVASSTquTjOldHt/nUWrFRnJeZyzt6CIOjAcugbcfGtN1" - - - name: root password | Set root password - when: ansible_virtualization_type == "virtualbox" - command: chage -d 0 root - changed_when: False - - - name: root password | Disable root password - when: ansible_virtualization_type != "virtualbox" - command: passwd --delete root - changed_when: False - - - name: root password | Disable root password - when: ansible_virtualization_type != "virtualbox" - command: passwd --lock root - changed_when: False - - - name: chronyd | Fix start-up sequence - replace: - dest: /usr/lib/systemd/system/chronyd.service - regexp: 'After=' - replace: 'Before=cloud-config.target\nAfter=network-online.target ' - - - name: disable root user | Disable root user - copy: - content: | - no_ssh_fingerprints: true - disable_root: true - dest: /etc/cloud/cloud.cfg.d/00_disable-root.cfg - mode: 0644 - - - name: add user | Add admin user - when: create_admin == "true" - user: - name: admin - comment: Cloud User - groups: wheel,adm,systemd-journal - shell: /bin/bash - - - name: add user | Add sudo for admin user - when: create_admin == "true" - copy: - content: | - admin ALL=(ALL) NOPASSWD: ALL - dest: /etc/sudoers.d/90-admin-user - mode: 0440 - - - name: change cloud user | Change cloud user - when: create_admin == "true" - replace: - dest: /etc/cloud/cloud.cfg - regexp: 'name: centos' - replace: 'name: admin' - - - name: cloud-init configuration | stat /etc/waagent.conf - stat: path=/etc/waagent.conf - register: waagent_conf - - - name: cloud-init configuration | Enable cloud-init for Azure - when: waagent_conf.stat.exists - replace: - dest: /etc/waagent.conf - regexp: 'Provisioning.UseCloudInit=n' - replace: 'Provisioning.UseCloudInit=y' - - replace: - dest: /usr/lib/python2.7/site-packages/azurelinuxagent/pa/deprovision/default.py - regexp: 'warnings, actions, deluser=deluser' - replace: 'warnings, actions, include_once=False, deluser=deluser' - - - name: PMM URL file | Add script which show PMM URL - copy: - src: show-url - dest: /opt/show-url - mode: 0755 - - - name: PMM URL Service | Add Service for script which show PMM URL - copy: - src: banner.service - dest: /etc/systemd/system/banner.service - mode: 0755 - - - name: Enable PMM URL Service | Enable PMM URL Service - systemd: - name: banner - state: started - enabled: yes - - - name: PMM IP in Log | Add PMM IP in Log file - lineinfile: - line: 'IP: \4' - path: /etc/issue - create: yes - - - import_tasks: security.yml - - import_tasks: ovf.yml - - import_tasks: ami.yml - +- name: Packages | Add EPEL repository for EL7 + when: + - ansible_distribution == 'CentOS' + - ansible_distribution_major_version == '7' + yum: + name: https://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/e/epel-release-7-14.noarch.rpm + state: installed + +- name: Packages | Add EPEL repository for EL9 + when: + - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' + - ansible_distribution_major_version == '9' + yum: + name: epel-release + state: installed + +- name: Packages | Install OS tools for EL7 + when: + - ansible_distribution == 'CentOS' + - ansible_distribution_major_version == '7' + yum: + name: + - screen + - yum-utils + - cloud-init + - firewalld + - python2-pip + - ansible + +- name: Packages | Install OS tools for EL9 + when: + - (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + yum: + name: + - screen + - yum-utils + - cloud-init + - firewalld + - python3-pip + - ansible + +- name: Firewalld | Start + when: ansible_os_family == 'RedHat' + service: + name: firewalld + state: started + enabled: yes + +- name: cleanup cache | Cleanup cache + file: path={{ item }} state=absent + with_items: + - /var/lib/cloud/sem + - /var/lib/cloud/data + - /var/lib/cloud/instance + - /var/lib/cloud/instances + - /var/log/cloud-init.log + - /var/log/cloud-init-output.log + +- name: create dir | Create getty@.service.d directory + file: + path: /etc/systemd/system/getty@.service.d + state: directory + +- name: cloud-init | Disable console cleanup + copy: + content: | + [Service] + TTYVTDisallocate=no + dest: /etc/systemd/system/getty@.service.d/nodisallocate.conf + mode: 0644 + +- name: root password | Set root password + when: ansible_virtualization_type == "virtualbox" + user: + name: root + password: "$6$J7pGg2a7vuRTbTV5$vORqkiAKdkyomU3iYwr/SPn.yLIkGsl5ludEx5DUvGVASSTquTjOldHt/nUWrFRnJeZyzt6CIOjAcugbcfGtN1" + +- name: root password | Set root password + when: ansible_virtualization_type == "virtualbox" + command: chage -d 0 root + changed_when: False + +- name: root password | Disable root password + when: ansible_virtualization_type != "virtualbox" + command: passwd --delete root + changed_when: False + +- name: root password | Disable root password + when: ansible_virtualization_type != "virtualbox" + command: passwd --lock root + changed_when: False + +- name: chronyd | Fix start-up sequence + replace: + dest: /usr/lib/systemd/system/chronyd.service + regexp: "After=" + replace: 'Before=cloud-config.target\nAfter=network-online.target ' + +- name: disable root user | Disable root user + copy: + content: | + no_ssh_fingerprints: true + disable_root: true + dest: /etc/cloud/cloud.cfg.d/00_disable-root.cfg + mode: 0644 + +- name: add user | Add admin user + when: create_admin == "true" + user: + name: admin + comment: Cloud User + groups: wheel,adm,systemd-journal + shell: /bin/bash + +- name: add user | Add sudo for admin user + when: create_admin == "true" + copy: + content: | + admin ALL=(ALL) NOPASSWD: ALL + dest: /etc/sudoers.d/90-admin-user + mode: 0440 + +- name: change cloud user EL7 | Change cloud user + when: create_admin == "true" and ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + replace: + dest: /etc/cloud/cloud.cfg + regexp: "name: centos" + replace: "name: admin" + +- name: change cloud user EL9 | Change cloud user + when: create_admin == "true" and (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + replace: + dest: /etc/cloud/cloud.cfg + regexp: "name: ec2-user" + replace: "name: admin" + +- name: cloud-init configuration | stat /etc/waagent.conf + stat: path=/etc/waagent.conf + register: waagent_conf + +- name: cloud-init configuration | Enable cloud-init for Azure + when: waagent_conf.stat.exists + replace: + dest: /etc/waagent.conf + regexp: "Provisioning.UseCloudInit=n" + replace: "Provisioning.UseCloudInit=y" + +- name: Azure tweaks + when: waagent_conf.stat.exists + replace: + dest: /usr/lib/python2.7/site-packages/azurelinuxagent/pa/deprovision/default.py + regexp: "warnings, actions, deluser=deluser" + replace: "warnings, actions, include_once=False, deluser=deluser" + +- name: PMM URL file | Add script which show PMM URL + copy: + src: show-url + dest: /opt/show-url + mode: 0755 + +- name: PMM URL Service | Add Service for script which show PMM URL + copy: + src: banner.service + dest: /etc/systemd/system/banner.service + mode: 0755 + +- name: Enable PMM URL Service | Enable PMM URL Service + systemd: + name: banner + state: started + enabled: yes + +- name: PMM IP in Log | Add PMM IP in Log file + lineinfile: + line: 'IP: \4' + path: /etc/issue + create: yes + +- import_tasks: security.yml +- import_tasks: ovf.yml +- import_tasks: ami.yml diff --git a/build/ansible/roles/cloud-node/tasks/ovf.yml b/build/ansible/roles/cloud-node/tasks/ovf.yml index 160ce1e860..db520b2959 100644 --- a/build/ansible/roles/cloud-node/tasks/ovf.yml +++ b/build/ansible/roles/cloud-node/tasks/ovf.yml @@ -1,15 +1,15 @@ --- # Common things for all OVF images - - name: ovf | Disable EC2, CloudStack - when: ansible_virtualization_type == "virtualbox" - copy: - content: | - datasource_list: [ NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, None ] - disable_ec2_metadata: true - datasource: - OpenStack: - max_wait: 6 - timeout: 3 - retries: 2 - dest: /etc/cloud/cloud.cfg.d/90_disable-cloud.cfg - mode: 0644 +- name: ovf | Disable EC2, CloudStack + when: ansible_virtualization_type == "virtualbox" + copy: + content: | + datasource_list: [ NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, None ] + disable_ec2_metadata: true + datasource: + OpenStack: + max_wait: 6 + timeout: 3 + retries: 2 + dest: /etc/cloud/cloud.cfg.d/90_disable-cloud.cfg + mode: 0644 diff --git a/build/ansible/roles/lvm-init/tasks/main.yml b/build/ansible/roles/lvm-init/tasks/main.yml index 064348f422..269e0b5f9d 100644 --- a/build/ansible/roles/lvm-init/tasks/main.yml +++ b/build/ansible/roles/lvm-init/tasks/main.yml @@ -1,105 +1,115 @@ --- - - name: Packages | Install OS tools - yum: name=lvm2 state=installed - - - block: - - name: Data partition | list - shell: ls /dev/sda /dev/sdb /dev/sdc /dev/xvdb | grep -v ^$(pvdisplay -c | grep ':VolGroup00:' | cut -d ':' -f 1 | tr -d '[:space:]' | sed 's/[0-9]$//')$ | grep -v ^$(findmnt -f -n -o SOURCE / | sed 's/[0-9]$//')$ | grep -v ^$(findmnt -f -n -o SOURCE /mnt/resource | sed 's/[0-9]$//')$ - register: available_drives - failed_when: available_drives.stdout_lines|length != 1 - changed_when: False - - - name: Data partition | Create Volume Group - when: enable_lvm == "true" - lvg: - vg: DataVG - pvs: "{{ available_drives.stdout_lines[0] }}" - - - name: Data partition | Create Thin Pool - when: enable_lvm == "true" - register: thin_pool - failed_when: "thin_pool is failed and 'Sorry, no shrinking of DataLV to 0 permitted' not in thin_pool.msg" - lvol: - lv: DataLV - vg: DataVG - size: 100%FREE - opts: --thinpool ThinPool -V 1G - - - name: Data partition | Format LVM - when: enable_lvm == "true" - filesystem: - fstype: xfs - dev: /dev/DataVG/DataLV - opts: -L DATA - - - name: Data partition | Format Device - when: enable_lvm != "true" - filesystem: - fstype: xfs - dev: "{{ available_drives.stdout_lines[0] }}" - opts: -L DATA - - - name: Data partition | Mount - mount: - name: "{{ data_partition }}" - src: LABEL=DATA - fstype: xfs - opts: defaults,nofail - state: mounted - when: single_disk == "false" - - - name: System Log | stat /boot/grub/grub.conf - stat: path=/boot/grub/grub.conf - register: grub1_conf - - - name: System Log | change /boot/grub/grub.conf - when: grub1_conf.stat.exists - replace: - dest: /boot/grub/grub.conf - regexp: 'console=hvc0' - replace: 'console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 xen_emul_unplug=unnecessary' - - - name: System Log | stat /boot/grub2/grub.cfg - stat: path=/boot/grub2/grub.cfg - register: grub2_conf - - - name: System Log | change /etc/default/grub - when: grub2_conf.stat.exists - replace: - dest: /etc/default/grub - regexp: 'rhgb' - replace: 'console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 xen_emul_unplug=unnecessary' - register: grub2_var - - - name: System Log | run grub2-mkconfig - when: grub2_var.changed and grub2_conf.stat.exists - command: grub2-mkconfig -o /boot/grub2/grub.cfg - - - name: Create dirs | Create dirs - file: path={{ item }} state=directory - with_items: - - /var/lib/cloud/scripts/per-once - - /var/lib/cloud/scripts/per-boot - - - name: Data partition | Auto resize LVM - when: enable_lvm == "true" and single_disk == "false" - template: - src: resize-xfs-lvm - dest: /var/lib/cloud/scripts/per-boot/resize-xfs - mode: 0755 - - - name: Data partition | Auto resize device +- name: Packages | Install OS tools + yum: + name: lvm2 + state: installed + +- block: + - name: Data partition | https://bugzilla.redhat.com/show_bug.cgi?id=1965941 + shell: sed -i 's/devices {/devices { \nuse_devicesfile=0/g' /etc/lvm/lvm.conf + ignore_errors: True + + - name: Data partition | list + shell: ls /dev/sda /dev/sdb /dev/sdc /dev/xvdb | grep -v ^$(pvdisplay -c | grep ':VolGroup00:' | cut -d ':' -f 1 | tr -d '[:space:]' | sed 's/[0-9]$//')$ | grep -v ^$(findmnt -f -n -o SOURCE / | sed 's/[0-9]$//')$ | grep -v ^$(findmnt -f -n -o SOURCE /mnt/resource | sed 's/[0-9]$//')$ + register: available_drives + failed_when: available_drives.stdout_lines|length != 1 + changed_when: False + + - name: Data partition | Create Volume Group + when: enable_lvm == "true" + lvg: + vg: DataVG + pvs: "{{ available_drives.stdout_lines[0] }}" + ignore_errors: True + + - name: Data partition | Create Thin Pool + when: enable_lvm == "true" + register: thin_pool + failed_when: "thin_pool is failed and 'Sorry, no shrinking of DataLV to 0 permitted' not in thin_pool.msg" + lvol: + lv: DataLV + vg: DataVG + size: 100%FREE + opts: --thinpool ThinPool -V 1G + ignore_errors: True + + - name: Data partition | Format LVM + when: enable_lvm == "true" + filesystem: + fstype: xfs + dev: /dev/DataVG/DataLV + opts: -L DATA + ignore_errors: True + + - name: Data partition | Format Device when: enable_lvm != "true" - template: - src: resize-xfs-no-lvm - dest: /var/lib/cloud/scripts/per-boot/resize-xfs - mode: 0755 - - - name: Cron tasks | Add resize task to cron - cron: - name: "resize data partition" - minute: "*/5" - user: root - job: "/var/lib/cloud/scripts/per-boot/resize-xfs" - cron_file: resizeXfs - + filesystem: + fstype: xfs + dev: "{{ available_drives.stdout_lines[0] }}" + opts: -L DATA + ignore_errors: True + + - name: Data partition | Mount + mount: + name: "{{ data_partition }}" + src: LABEL=DATA + fstype: xfs + opts: defaults,nofail + state: mounted + ignore_errors: True + when: single_disk == "false" + +- name: System Log | stat /boot/grub/grub.conf + stat: path=/boot/grub/grub.conf + register: grub1_conf + +- name: System Log | change /boot/grub/grub.conf + when: grub1_conf.stat.exists + replace: + dest: /boot/grub/grub.conf + regexp: "console=hvc0" + replace: "console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 xen_emul_unplug=unnecessary" + +- name: System Log | stat /boot/grub2/grub.cfg + stat: path=/boot/grub2/grub.cfg + register: grub2_conf + +- name: System Log | change /etc/default/grub + when: grub2_conf.stat.exists + replace: + dest: /etc/default/grub + regexp: "rhgb" + replace: "console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300 xen_emul_unplug=unnecessary" + register: grub2_var + +- name: System Log | run grub2-mkconfig + when: grub2_var.changed and grub2_conf.stat.exists + command: grub2-mkconfig -o /boot/grub2/grub.cfg + +- name: Create dirs | Create dirs + file: path={{ item }} state=directory + with_items: + - /var/lib/cloud/scripts/per-once + - /var/lib/cloud/scripts/per-boot + +- name: Data partition | Auto resize LVM + when: enable_lvm == "true" and single_disk == "false" + template: + src: resize-xfs-lvm + dest: /var/lib/cloud/scripts/per-boot/resize-xfs + mode: 0755 + +- name: Data partition | Auto resize device + when: enable_lvm != "true" + template: + src: resize-xfs-no-lvm + dest: /var/lib/cloud/scripts/per-boot/resize-xfs + mode: 0755 + +- name: Cron tasks | Add resize task to cron + cron: + name: "resize data partition" + minute: "*/5" + user: root + job: "/var/lib/cloud/scripts/per-boot/resize-xfs" + cron_file: resizeXfs diff --git a/build/ansible/roles/pmm2-images/tasks/main.yml b/build/ansible/roles/pmm2-images/tasks/main.yml index 4a076825ac..f7812a6328 100644 --- a/build/ansible/roles/pmm2-images/tasks/main.yml +++ b/build/ansible/roles/pmm2-images/tasks/main.yml @@ -1,140 +1,181 @@ --- - - name: Packages | Add PMM2 Server YUM repository - yum_repository: - name: pmm2-server - description: PMM Server YUM repository - x86_64 - baseurl: https://repo.percona.com/pmm2-components/yum/{{ pmm2_server_repo }}/7/RPMS/x86_64/ - gpgcheck: yes - enabled: yes - gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY - - # local yum repo for building of pmm server docker image in autobuild jobs - - name: PMM | Add local YUM repository - when: ansible_virtualization_type == "docker" - yum_repository: - name: local - description: Local YUM repository - x86_64 - baseurl: file:///tmp/RPMS - gpgcheck: no - enabled: no - - # we use it for pmm-client (TODO we'll need switch to pmm-client client repo) - - name: Packages | Install percona-release rpm - yum: - name: https://repo.percona.com/yum/percona-release-latest.noarch.rpm - state: installed - - - name: Packages | Update OS - yum: - name: '*' - state: latest - exclude: 'ansible' - disablerepo: percona-release-x86_64 - - - name: Packages | Install OS tools - yum: - name: - - python2-pip - - rsync - - - name: Create users for non-docker images | Create users +- name: Install the GPG key for repo.percona.com + rpm_key: + state: present + key: https://downloads.percona.com/downloads/RPM-GPG-KEY-percona + +- name: Packages | Add PMM2 Server YUM repository for EL7 + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" + yum_repository: + name: pmm2-server + description: PMM Server YUM repository - x86_64 + baseurl: https://repo.percona.com/pmm2-components/yum/{{ pmm2_server_repo }}/7/RPMS/x86_64/ + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + +- name: Packages | Add PMM2 Server YUM repository for EL9 + when: + - (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + yum_repository: + name: pmm2-server + description: PMM Server YUM repository - x86_64 + baseurl: https://repo.percona.com/pmm2-components/yum/{{ pmm2_server_repo }}/9/RPMS/x86_64/ + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + +# local yum repo for building of pmm server docker image in autobuild jobs +- name: PMM | Add local YUM repository + when: ansible_virtualization_type == "docker" + yum_repository: + name: local + description: Local YUM repository - x86_64 + baseurl: file:///tmp/RPMS + gpgcheck: no + enabled: no + +# we use it for pmm-client (TODO we'll need switch to pmm-client client repo) +# To workaround the package's incompatibility with RHEL9, we have to ignore errors :( +# Error: `Failed to validate GPG signature for percona-release-1.0-27.noarch` +# Despite the error, this will still install the repo and the GPG key +- name: Packages | Install percona-release rpm + yum: + name: https://repo.percona.com/yum/percona-release-latest.noarch.rpm + state: installed + ignore_errors: True + +- name: Packages | Update OS + yum: + name: "*" + state: latest + exclude: "ansible*" + disablerepo: percona-release-x86_64 + +- name: Packages | Install OS tools for EL7 + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" + yum: + name: + - python2-pip + - rsync + +- name: Packages | Install OS tools for EL9 + when: + - (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + yum: + name: + - python3-pip + - python3.11-pip + - python3.11 + - python3.11-psycopg2 + - rsync + - libsqlite3x-devel # package does not come pre-installed on EL9 + +- name: Create users for non-docker images | Create users + user: + name: "pmm" + when: ansible_virtualization_type != "docker" + +- name: Create users and groups in docker container + block: + - name: Ensure groups exist with correct gid + group: + name: "{{ item.name }}" + gid: "{{ item.gid }}" + non_unique: true + loop: + - { name: pmm, gid: 1000 } + - { name: nginx, gid: 999 } + - { name: grafana, gid: 998 } + - { name: clickhouse, gid: 997 } + - { name: pmm-agent, gid: 996 } + + - name: Create users | Create users user: - name: "pmm" - when: ansible_virtualization_type != "docker" - - - name: Create users and groups in docker container - block: - - name: Ensure groups exist with correct gid - group: - name: "{{ item.name }}" - gid: "{{ item.gid }}" - loop: - - { name: pmm, gid: 1000 } - - { name: nginx, gid: 998 } - - { name: grafana, gid: 996 } - - { name: clickhouse, gid: 995 } - - { name: pmm-agent, gid: 994 } - - - name: Create users | Create users - user: - name: "{{ item.name }}" - uid: "{{ item.uid }}" - home: "{{ item.home }}" - comment: "{{ item.comment }}" - shell: "{{ item.shell }}" - group: "{{ item.group }}" - loop: - - { name: pmm, uid: 1000, comment: "PMM Server", shell: "/bin/false", home: "/home/pmm", group: pmm } - - { name: nginx, uid: 999, comment: "nginx user", shell: "/sbin/nologin", home: "/var/cache/nginx", group: nginx } - - { name: grafana, uid: 998, comment: "Grafana Dashboard", shell: "/sbin/nologin", home: "/etc/grafana", group: grafana } - - { name: clickhouse, uid: 997, comment: "Clickhouse server", shell: "/sbin/nologin", home: "/var/lib/clickhouse", group: clickhouse } - - { name: pmm-agent, uid: 996, comment: "pmm-agent", shell: "/bin/false", home: "/usr/local/percona/", group: pmm-agent } - when: ansible_virtualization_type == "docker" - - - name: Create directories | Create dirs - file: path={{ item }} state=directory owner=pmm group=pmm - with_items: - - /srv/prometheus/data - - /srv/prometheus/rules - - /srv/alertmanager/data - - - name: Create directories | Create dirs - file: - path: /srv/logs - state: directory - owner: pmm - group: pmm - mode: '0775' - - - name: Create dirs | Create dirs - when: ansible_virtualization_type == "docker" - file: path={{ item }} state=directory - with_items: - - /var/lib/cloud/scripts/per-once - - /var/lib/cloud/scripts/per-boot - - - name: Install supervisord - include_role: - name: supervisord-init - - - name: Install RPMs | Install RPMs for PMM2 server - yum: - name: - - percona-grafana - - percona-victoriametrics - - percona-qan-api2 - - percona-dashboards - - percona-alertmanager - - pmm-managed - - pmm-update - - dbaas-controller - - dbaas-tools - - pmm-dump - - vmproxy - state: installed - # line below is sed'ed by build-server-docker script - enablerepo: "pmm2-server" - - - name: PMM | Enable repo for pmm2-client - command: percona-release enable {{ pmm_client_repos }} - - - name: Install RPMs | Install pmm2-client rpm - yum: - name: - - pmm2-client - state: installed - enablerepo: "{{ pmm_client_repo_name }}" - - - name: Disable pmm-agent service | Disable pmm-agent - when: ansible_virtualization_type != "docker" - service: name=pmm-agent state=stopped enabled=no - - - name: Create tmp dirs | Create tmp dirs - when: ansible_virtualization_type != "docker" - command: /usr/bin/systemd-tmpfiles --create --remove --boot --exclude-prefix=/dev - - - name: Copy grafana.ini file for the first run - copy: - src: grafana.ini - dest: /etc/supervisord.d/grafana.ini - mode: '0644' + name: "{{ item.name }}" + uid: "{{ item.uid }}" + home: "{{ item.home }}" + comment: "{{ item.comment }}" + shell: "{{ item.shell }}" + group: "{{ item.group }}" + non_unique: true + loop: + - { name: pmm, uid: 1000, comment: "PMM Server", shell: "/bin/false", home: "/home/pmm", group: pmm, } + - { name: nginx, uid: 999, comment: "nginx user", shell: "/sbin/nologin", home: "/var/cache/nginx", group: nginx, } + - { name: grafana, uid: 998, comment: "Grafana Dashboard", shell: "/sbin/nologin", home: "/etc/grafana", group: grafana, } + - { name: clickhouse, uid: 997, comment: "Clickhouse server", shell: "/sbin/nologin", home: "/var/lib/clickhouse", group: clickhouse, } + - { name: pmm-agent, uid: 996, comment: "pmm-agent", shell: "/bin/false", home: "/usr/local/percona/", group: pmm-agent, } + when: ansible_virtualization_type == "docker" + +- name: Create directories | Create dirs + file: path={{ item }} state=directory owner=pmm group=pmm + with_items: + - /srv/prometheus/data + - /srv/prometheus/rules + - /srv/alertmanager/data + +- name: Create directories | Create dirs + file: + path: /srv/logs + state: directory + owner: pmm + group: pmm + mode: "0775" + +- name: Create dirs | Create dirs + when: ansible_virtualization_type == "docker" + file: path={{ item }} state=directory + with_items: + - /var/lib/cloud/scripts/per-once + - /var/lib/cloud/scripts/per-boot + +- name: Install supervisord + include_role: + name: supervisord-init + +- name: Install RPMs | Install RPMs for PMM2 server + yum: + name: + - percona-grafana + - percona-victoriametrics + - percona-qan-api2 + - percona-dashboards + - percona-alertmanager + - pmm-managed + - pmm-update + - dbaas-controller + - dbaas-tools + - grafana-db-migrator + - pmm-dump + - vmproxy + state: installed + # line below is sed'ed by build-server-docker script + enablerepo: "pmm2-server" + +- name: PMM | Enable repo for pmm2-client + command: percona-release enable {{ pmm_client_repos }} + +- name: Install RPMs | Install pmm2-client rpm + yum: + name: + - pmm2-client + state: installed + enablerepo: "{{ pmm_client_repo_name }}" + +- name: Disable pmm-agent service | Disable pmm-agent + when: ansible_virtualization_type != "docker" + service: name=pmm-agent state=stopped enabled=no + +- name: Create tmp dirs | Create tmp dirs + when: ansible_virtualization_type != "docker" + command: /usr/bin/systemd-tmpfiles --create --remove --boot --exclude-prefix=/dev + +- name: Copy grafana.ini file for the first run + copy: + src: grafana.ini + dest: /etc/supervisord.d/grafana.ini + mode: "0644" diff --git a/build/ansible/roles/supervisord-init/tasks/main.yml b/build/ansible/roles/supervisord-init/tasks/main.yml index 82aa46bec7..9dd37bcb05 100644 --- a/build/ansible/roles/supervisord-init/tasks/main.yml +++ b/build/ansible/roles/supervisord-init/tasks/main.yml @@ -1,92 +1,115 @@ --- - - name: Install supervisor | Install supervisor 3.4.0 - pip: - name: supervisor==3.4.0 - - - name: Configure supervisor | Create a defult configuration file for supervisord - shell: echo_supervisord_conf > /etc/supervisord.conf - - - name: Configure supervisor | Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: include - option: files - value: supervisord.d/*.ini - - - name: Install supervisor | Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: unix_http_server - option: file - value: /var/run/supervisor/supervisor.sock - - - name: Configure supervisor | Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: logfile - value: "/srv/logs/supervisord.log" - - - name: Configure supervisor | Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: pidfile - value: /var/run/supervisord.pid - - - name: Configure supervisor | Modify supervisord.conf - ini_file: - dest: /etc/supervisord.conf - section: supervisorctl - option: serverurl - value: unix:///var/run/supervisor/supervisor.sock - - - name: Configure supervisor | Create dirs - file: - path: /var/run/supervisor - state: directory - mode: '0770' - - - name: Configure supervisor | Create /etc/supervisord.d dir - file: - path: /etc/supervisord.d - mode: '0755' - state: directory - - - name: Configure supervisor | Add /etc/tmpfiles.d/supervisor.conf config - when: ansible_virtualization_type != "docker" - copy: - content: | - D /var/run/supervisor 0775 root root - - dest: /etc/tmpfiles.d/supervisor.conf - mode: 0644 - - - name: Configure supervisor | Fix credentials - ini_file: - dest: /etc/supervisord.conf - section: supervisorctl - option: username - value: dummy - - - name: Configure supervisor | Fix credentials - ini_file: - dest: /etc/supervisord.conf - section: supervisorctl - option: password - value: dummy - - - name: Configure supervisor | Increase number of open files for jobs - when: ansible_virtualization_type != "docker" - ini_file: - dest: /etc/supervisord.conf - section: supervisord - option: minfds - value: "800000" - - - name: Configure supervisor | Add supervisord.service - when: ansible_virtualization_type != "docker" - copy: - src: supervisord.service - dest: /usr/lib/systemd/system/ - mode: 0644 +- name: Install supervisor | EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + pip: + name: supervisor==3.4.0 +- name: Install supervisor | EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + pip: + name: supervisor==4.2.4 + +- name: Fix supervisor EL9 | Fix supervisor + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + shell: if [ ! -e /usr/bin/supervisord ]; then ln -s /usr/local/bin/supervisord /usr/bin/supervisord; fi + +- name: Configure supervisor EL7 | Create a default configuration file for supervisord + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + shell: echo_supervisord_conf > /etc/supervisord.conf + +- name: Configure supervisor EL9 | Create a default configuration file for supervisord + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + shell: /usr/local/bin/echo_supervisord_conf > /etc/supervisord.conf + ignore_errors: yes + +- name: Configure supervisor | Modify supervisord.conf + ini_file: + dest: /etc/supervisord.conf + section: include + option: files + value: supervisord.d/*.ini + +- name: Install supervisor | Modify supervisord.conf + ini_file: + dest: /etc/supervisord.conf + section: unix_http_server + option: file + value: /var/run/supervisor/supervisor.sock + +- name: Configure supervisor | Modify supervisord.conf + ini_file: + dest: /etc/supervisord.conf + section: supervisord + option: logfile + value: "/srv/logs/supervisord.log" + +- name: Configure supervisor | Modify supervisord.conf + ini_file: + dest: /etc/supervisord.conf + section: supervisord + option: pidfile + value: /var/run/supervisord.pid + +- name: Configure supervisor | Modify supervisord.conf + ini_file: + dest: /etc/supervisord.conf + section: supervisorctl + option: serverurl + value: unix:///var/run/supervisor/supervisor.sock + +- name: Configure supervisor | Create dirs + file: + path: /var/run/supervisor + state: directory + mode: "0770" + +- name: Configure supervisor | Create /etc/supervisord.d dir + file: + path: /etc/supervisord.d + mode: "0755" + state: directory + +- name: Configure supervisor | Add /etc/tmpfiles.d/supervisor.conf config + when: ansible_virtualization_type != "docker" + copy: + content: | + D /var/run/supervisor 0775 root root - + dest: /etc/tmpfiles.d/supervisor.conf + mode: 0644 + +- name: Configure supervisor | Fix credentials + ini_file: + dest: /etc/supervisord.conf + section: supervisorctl + option: username + value: dummy + +- name: Configure supervisor | Fix credentials + ini_file: + dest: /etc/supervisord.conf + section: supervisorctl + option: password + value: dummy + +- name: Configure supervisor | Increase number of open files for jobs + when: ansible_virtualization_type != "docker" + ini_file: + dest: /etc/supervisord.conf + section: supervisord + option: minfds + value: "800000" + +- name: Configure supervisor | Add supervisord.service + when: ansible_virtualization_type != "docker" + copy: + src: supervisord.service + dest: /usr/lib/systemd/system/ + mode: 0644 + +- name: Fix motd | Fix motd + shell: echo "Welcome to PMM Server!" > /etc/motd; echo "Welcome to PMM Server!" > /etc/motd.conf + +- name: Debug | Print the contents of supervisord.conf + debug: + msg: + - "{{ lookup('file', '/etc/supervisord.conf') }}" diff --git a/build/docker/client/Dockerfile b/build/docker/client/Dockerfile index d3659c74ae..545e900218 100644 --- a/build/docker/client/Dockerfile +++ b/build/docker/client/Dockerfile @@ -12,12 +12,6 @@ FROM redhat/ubi9-micro ARG VERSION ARG BUILD_DATE -LABEL org.label-schema.build-date ${BUILD_DATE} -LABEL org.label-schema.license Apache-2.0 -LABEL org.label-schema.name Percona Monitoring and Management Client -LABEL org.label-schema.vendor Percona -LABEL org.label-schema.version ${VERSION} -LABEL org.label-schema.schema-version "1.0" LABEL org.opencontainers.image.created ${BUILD_DATE} LABEL org.opencontainers.image.licenses Apache-2.0 LABEL org.opencontainers.image.title Percona Monitoring and Management Client diff --git a/build/docker/rpmbuild/Dockerfile.el9 b/build/docker/rpmbuild/Dockerfile.el9 new file mode 100644 index 0000000000..987dbe9f39 --- /dev/null +++ b/build/docker/rpmbuild/Dockerfile.el9 @@ -0,0 +1,53 @@ +FROM oraclelinux:9-slim + +# enable nodesource repo for nodejs +RUN curl -sL https://rpm.nodesource.com/setup_16.x | bash - +RUN curl -sL https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo + +RUN yum update -y && \ + yum install -y --setopt=skip_missing_names_on_install=False \ + gcc gcc-c++ \ + nodejs \ + libtool libtool-ltdl \ + make cmake \ + git \ + pkgconfig \ + sudo \ + automake autoconf \ + rpmdevtools createrepo_c epel-release \ + bison yum-utils rpm-build \ + rsync \ + wget \ + yarn && \ + yum install -y --enablerepo=ol9_codeready_builder glibc-static && \ + yum remove -y nodesource-release-el9-1.noarch && \ + yum clean all && rm -rf /var/cache/yum + +# keep that format for easier search +ENV GO_VERSION 1.19.1 +ENV GO_RELEASER_VERSION 1.9.2 + +RUN if [ `uname -i` == "x86_64" ]; then ARCH=amd64; else ARCH=arm64; fi && \ + wget --progress=dot:giga https://dl.google.com/go/go${GO_VERSION}.linux-${ARCH}.tar.gz -O /tmp/golang.tar.gz && \ + wget --progress=dot:giga https://github.com/goreleaser/goreleaser/releases/download/v${GO_RELEASER_VERSION}/goreleaser-${GO_RELEASER_VERSION}.`uname -i`.rpm -O /tmp/goreleaser.rpm && \ + tar -C /usr/local -xzf /tmp/golang.tar.gz && \ + yum install -y /tmp/goreleaser.rpm && \ + rm /tmp/golang.tar.gz /tmp/goreleaser.rpm + +RUN update-alternatives --install "/usr/bin/go" "go" "/usr/local/go/bin/go" 0 +RUN update-alternatives --set go /usr/local/go/bin/go +RUN update-alternatives --install "/usr/bin/gofmt" "gofmt" "/usr/local/go/bin/gofmt" 0 +RUN update-alternatives --set gofmt /usr/local/go/bin/gofmt + +RUN useradd builder -u 1000 -m -G users,wheel && \ + echo "builder ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers && \ + echo "# macros" > /home/builder/.rpmmacros && \ + echo "%_topdir /home/builder/rpm" >> /home/builder/.rpmmacros && \ + mkdir /home/builder/rpm && \ + chmod 755 /home/builder && \ + chown -R builder:builder /home/builder + +USER builder + +ENV FLAVOR=rpmbuild OS=centos DIST=el9 +WORKDIR /home/builder/rpm diff --git a/build/docker/server/Dockerfile b/build/docker/server/Dockerfile index 3fbe78bc9c..d5c2a752f7 100644 --- a/build/docker/server/Dockerfile +++ b/build/docker/server/Dockerfile @@ -3,11 +3,6 @@ FROM centos:7 ARG VERSION ARG BUILD_DATE -LABEL org.label-schema.build-date ${BUILD_DATE} -LABEL org.label-schema.license AGPL-3.0 -LABEL org.label-schema.name Percona Monitoring and Management -LABEL org.label-schema.vendor Percona -LABEL org.label-schema.version ${VERSION} LABEL org.opencontainers.image.created ${BUILD_DATE} LABEL org.opencontainers.image.licenses AGPL-3.0 LABEL org.opencontainers.image.title Percona Monitoring and Management diff --git a/build/docker/server/Dockerfile.el9 b/build/docker/server/Dockerfile.el9 new file mode 100644 index 0000000000..e860488e7d --- /dev/null +++ b/build/docker/server/Dockerfile.el9 @@ -0,0 +1,36 @@ +FROM oraclelinux:9-slim + +ARG VERSION +ARG BUILD_DATE + +LABEL org.opencontainers.image.created ${BUILD_DATE} +LABEL org.opencontainers.image.licenses AGPL-3.0 +LABEL org.opencontainers.image.title Percona Monitoring and Management +LABEL org.opencontainers.image.vendor Percona LLC +LABEL org.opencontainers.image.version ${VERSION} + +EXPOSE 80 443 + +WORKDIR /opt + +# NOTE: Ansible should NOT be installed via yum/dnf +# Read more: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#pip-install +RUN microdnf -y install yum && yum -y install python3-pip && \ + yum -y install oracle-epel-release-el9 ansible-core && \ + python3 -m pip install ansible && \ + python3 -m pip install setuptools && \ + yum -y install epel-release + +COPY RPMS /tmp/RPMS +COPY gitCommit /tmp/gitCommit + +COPY ansible /opt/ansible +# NOTE: this needs to be refactored, since some of the playbooks are duplicates +RUN cp -r /opt/ansible/roles /opt/ansible/pmm2-docker/roles +RUN ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm2-docker/main.yml \ + && ansible-playbook -vvv -i 'localhost,' -c local /usr/share/pmm-update/ansible/playbook/tasks/update.yml \ + && ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm2/post-build-actions.yml + +COPY entrypoint.sh /opt/entrypoint.sh +HEALTHCHECK --interval=3s --timeout=2s --start-period=10s --retries=3 CMD curl -f http://127.0.0.1/v1/readyz || exit 1 +CMD ["/opt/entrypoint.sh"] diff --git a/build/docker/server/create_users.sh b/build/docker/server/create_users.sh new file mode 100644 index 0000000000..7cf342d344 --- /dev/null +++ b/build/docker/server/create_users.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +users=( + "pmm:1000:/bin/false:/home/pmm:pmm" + "nginx:999:/sbin/nologin:/var/cache/nginx:nginx" + "grafana:998:/sbin/nologin:/etc/grafana:grafana" + "clickhouse:997:/sbin/nologin:/var/lib/clickhouse:clickhouse" + "pmm-agent:996:/bin/false:/usr/local/percona/:pmm-agent" +) + +for user in "${users[@]}"; do + IFS=: read -r name uid shell home_dir group <<< "$user" + group_id="$uid" + + # Check if user already exists + if id "$name" >/dev/null 2>&1; then + echo "User $name already exists" + continue + fi + + # Create user with home directory if it doesn't exist + if [ ! -d "$home_dir" ]; then + mkdir -p "$home_dir" + fi + + # Create user with specified UID, GID, and shell + groupadd -o -g "$group_id" "$group" + useradd -o -u "$uid" -g "$group" -G "$group" -s "$shell" -d "$home_dir" -c "$name" -m "$name" + chown "$uid:$group_id" "$home_dir" + +done + diff --git a/build/docker/server/entrypoint.sh b/build/docker/server/entrypoint.sh index 3ec7ff8824..fceb3ccb1a 100755 --- a/build/docker/server/entrypoint.sh +++ b/build/docker/server/entrypoint.sh @@ -4,20 +4,20 @@ set -o errexit # init /srv if empty DIST_FILE=/srv/pmm-distribution if [ ! -f $DIST_FILE ]; then - echo "File $DIST_FILE doesn't exist. Initizlize /srv..." + echo "File $DIST_FILE doesn't exist. Initialize /srv..." echo docker > $DIST_FILE mkdir -p /srv/{clickhouse,grafana,logs,postgres14,prometheus,nginx,victoriametrics} - echo "Copy plugins and VERSION file" + echo "Copying plugins and VERSION file" cp /usr/share/percona-dashboards/VERSION /srv/grafana/PERCONA_DASHBOARDS_VERSION cp -r /usr/share/percona-dashboards/panels/ /srv/grafana/plugins chown -R grafana:grafana /srv/grafana chown pmm:pmm /srv/{victoriametrics,prometheus,logs} chown postgres:postgres /srv/postgres14 - echo "Generate self-signed certificates for nginx" + echo "Generating self-signed certificates for nginx" bash /var/lib/cloud/scripts/per-boot/generate-ssl-certificate - echo "Init Postgres" + echo "Initializing Postgres" su postgres -c "/usr/pgsql-14/bin/initdb -D /srv/postgres14" - echo "Temporary start postgres and enable pg_stat_statements" + echo "Enable pg_stat_statements extension" su postgres -c "/usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14" su postgres -c "psql postgres postgres -c 'CREATE EXTENSION pg_stat_statements SCHEMA public'" su postgres -c "/usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14" diff --git a/build/packages/rpm/client/pmm2-client.spec b/build/packages/rpm/client/pmm2-client.spec index 6a4b4c2d71..3fa8154ee9 100644 --- a/build/packages/rpm/client/pmm2-client.spec +++ b/build/packages/rpm/client/pmm2-client.spec @@ -1,7 +1,7 @@ %define debug_package %{nil} Name: pmm2-client -Summary: Percona Monitoring and Management Client +Summary: Percona Monitoring and Management Client (pmm-agent) Version: %{version} Release: %{release}%{?dist} Group: Applications/Databases @@ -10,7 +10,6 @@ Vendor: Percona LLC URL: https://percona.com Source: pmm2-client-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root -Summary: PMM-agent BuildRequires: systemd BuildRequires: pkgconfig(systemd) diff --git a/build/packages/rpm/server/SPECS/alertmanager.spec b/build/packages/rpm/server/SPECS/alertmanager.spec index 3033e09ca9..b1b0747c88 100644 --- a/build/packages/rpm/server/SPECS/alertmanager.spec +++ b/build/packages/rpm/server/SPECS/alertmanager.spec @@ -16,9 +16,6 @@ Source0: https://%{provider}/archive/%{commit}/%{repo}-%{shortcommit}.tar %description %{summary} -%description -%{summary} - %prep %setup -q -n %{repo}-%{commit} mkdir -p ./build/src/github.com/prometheus diff --git a/build/packages/rpm/server/SPECS/dbaas-tools.spec b/build/packages/rpm/server/SPECS/dbaas-tools.spec index 1efb7acf76..8295a45ee6 100644 --- a/build/packages/rpm/server/SPECS/dbaas-tools.spec +++ b/build/packages/rpm/server/SPECS/dbaas-tools.spec @@ -1,4 +1,5 @@ %undefine _missing_build_ids_terminate_build +%define debug_package %{nil} %global commit_aws 2a9ee95fecab59fab41a0b646a63227d66113434 %global shortcommit_aws %(c=%{commit_aws}; echo ${c:0:7}) @@ -8,9 +9,10 @@ %global version_k8s v1.23.7 %global install_golang 1 +%global debug_package %{nil} %define build_timestamp %(date -u +"%y%m%d%H%M") -%define release 1 +%define release 2 %define rpm_release %{release}.%{build_timestamp}%{?dist} Name: dbaas-tools @@ -29,9 +31,6 @@ BuildRequires: which %description %{summary} -%description -%{summary} - %prep %setup -T -c -n aws-iam-authenticator-%{commit_aws} %setup -q -c -a 0 -n aws-iam-authenticator-%{commit_aws} @@ -50,7 +49,7 @@ export CGO_ENABLED=0 export USER=builder cd src/github.com/kubernetes-sigs/aws-iam-authenticator-%{commit_aws} -sed -i '/dockers:/,+23d' .goreleaser.yaml +sed -i '/- darwin/d;/- windows/d;/- arm64/d;/dockers:/,+23d' .goreleaser.yaml make goreleaser cd %{_builddir}/kubernetes-%{commit_k8s}/ @@ -72,6 +71,9 @@ install -D -p -m 0775 _output/local/go/bin/kubectl %{buildroot}/opt/dbaas-tools/ /opt/dbaas-tools/bin/kubectl-1.23 %changelog +* Mon Nov 21 2022 Alex Tymchuk - 0.5.7-2 +- Fix the double description warning + * Wed May 04 2022 Nurlan Moldomurov - 0.5.7-1 - Update versions of dbaas-tools diff --git a/build/packages/rpm/server/SPECS/grafana-db-migrator.spec b/build/packages/rpm/server/SPECS/grafana-db-migrator.spec index 303924aba5..70de6bd075 100644 --- a/build/packages/rpm/server/SPECS/grafana-db-migrator.spec +++ b/build/packages/rpm/server/SPECS/grafana-db-migrator.spec @@ -46,5 +46,4 @@ install -m 755 dist/grafana-db-migrator %{buildroot}%{_sbindir}/ - Add fixes for CHAR fields * Tue Nov 02 2021 Nikita Beletskii - 1.0.1-1 -- Creating package for grafana-db-migrator - +- Creating package for grafana-db-migrator \ No newline at end of file diff --git a/build/packages/rpm/server/SPECS/grafana.spec b/build/packages/rpm/server/SPECS/grafana.spec index 6475c4d1b8..c69d5051d4 100644 --- a/build/packages/rpm/server/SPECS/grafana.spec +++ b/build/packages/rpm/server/SPECS/grafana.spec @@ -1,9 +1,9 @@ %global debug_package %{nil} -%global commit 33423d34f211ce1ce5ce0a265a38f0709ec44360 +%global commit f84a7c35000e11a2f4684852fd657f814381558c %global shortcommit %(c=%{commit}; echo ${c:0:7}) %define build_timestamp %(date -u +"%y%m%d%H%M") -%define release 98 -%define grafana_version 9.2.13 +%define release 99 +%define grafana_version 9.2.18 %define full_pmm_version 2.0.0 %define full_version v%{grafana_version}-%{full_pmm_version} %define rpm_release %{release}.%{build_timestamp}.%{shortcommit}%{?dist} @@ -21,7 +21,10 @@ URL: https://github.com/percona-platform/grafana Source0: https://github.com/percona-platform/grafana/archive/%{commit}.tar.gz ExclusiveArch: %{ix86} x86_64 %{arm} -BuildRequires: nodejs-grunt-cli fontconfig +BuildRequires: fontconfig +%if 0%{?rhel} < 9 +BuildRequires: nodejs-grunt-cli +%endif %description Grafana is an open source, feature rich metrics dashboard and graph editor for @@ -31,6 +34,9 @@ Graphite, InfluxDB & OpenTSDB. %setup -q -n grafana-%{commit} rm -rf Godeps sed -i "s/unknown-dev/%{grafana_version}/" pkg/build/git.go +%if 0%{?rhel} >= 9 + sudo npm install -g grunt-cli +%endif %build mkdir -p _build/src @@ -56,13 +62,12 @@ cp bin/linux-amd64/grafana-cli %{buildroot}%{_bindir}/ install -d -p %{buildroot}%{_sysconfdir}/grafana cp conf/sample.ini %{buildroot}%{_sysconfdir}/grafana/grafana.ini mv conf/ldap.toml %{buildroot}%{_sysconfdir}/grafana/ - install -d -p %{buildroot}%{_sharedstatedir}/grafana %files %defattr(-, grafana, grafana, -) %{_datadir}/grafana -%doc *.md +%doc CHANGELOG.md README.md %license LICENSE %attr(0755, root, root) %{_sbindir}/grafana %attr(0755, root, root) %{_sbindir}/grafana-server @@ -75,10 +80,13 @@ install -d -p %{buildroot}%{_sharedstatedir}/grafana getent group grafana >/dev/null || groupadd -r grafana getent passwd grafana >/dev/null || \ useradd -r -g grafana -d /etc/grafana -s /sbin/nologin \ - -c "Grafana Dashboard" grafana + -c "Grafana Server" grafana exit 0 %changelog +* Thu May 18 2023 Matej Kubinec - 9.2.18-1 +- PMM-12114 Grafana 9.2.18 + * Fri Mar 10 2023 Matej Kubinec - 9.2.13-1 - PMM-11762 Grafana 9.2.13 diff --git a/build/packages/rpm/server/SPECS/pmm-update.spec b/build/packages/rpm/server/SPECS/pmm-update.spec index ccaacc2550..779d194b36 100644 --- a/build/packages/rpm/server/SPECS/pmm-update.spec +++ b/build/packages/rpm/server/SPECS/pmm-update.spec @@ -1,6 +1,6 @@ %undefine _missing_build_ids_terminate_build -# we need to remove it as soon as we remove all noarch pmm-update rpms +# TODO: remove it as soon as we remove all noarch pmm-update rpms # from 'pmm2-components/yum/laboratory' %define _binaries_in_noarch_packages_terminate_build 0 %define _unpackaged_files_terminate_build 0 diff --git a/build/packages/rpm/server/SPECS/victoriametrics.spec b/build/packages/rpm/server/SPECS/victoriametrics.spec index 2288f8f002..52e849a091 100644 --- a/build/packages/rpm/server/SPECS/victoriametrics.spec +++ b/build/packages/rpm/server/SPECS/victoriametrics.spec @@ -56,13 +56,13 @@ install -D -p -m 0755 ./bin/vmalert-pure %{buildroot}%{_sbindir}/vmalert * Thu Oct 20 2022 Michal Kralik - 1.82.1 - upgrade victoriametrics to 1.82.1 release -* Thu May 11 2022 Michael Okoko - 1.77.1 +* Wed May 11 2022 Michael Okoko - 1.77.1 - upgrade victoriametrics to 1.77.1 release * Thu Apr 14 2022 Anton Bystrov - 1.76.1 - upgrade victoriametrics to 1.76.1 release -* Wed Jan 20 2022 Anton Bystrov - 1.72.0-1 +* Thu Jan 20 2022 Anton Bystrov - 1.72.0-1 - upgrade victoriametrics to 1.72.0 release * Thu Jun 3 2021 Vadim Yalovets - 1.60.0-1 @@ -80,16 +80,16 @@ install -D -p -m 0755 ./bin/vmalert-pure %{buildroot}%{_sbindir}/vmalert * Tue Dec 15 2020 Nurlan Moldomurov - 1.50.1-1 - upgrade victoriametrics to 1.50.1 release -* Tue Nov 26 2020 Nikolay Khramchikhin - 1.48.0-1 +* Thu Nov 26 2020 Nikolay Khramchikhin - 1.48.0-1 - upgrade victoriametrics to 1.48.0 release -* Tue Nov 19 2020 Nikolay Khramchikhin - 1.47.0-1 +* Thu Nov 19 2020 Nikolay Khramchikhin - 1.47.0-1 - upgrade victoriametrics to 1.47.0 release * Tue Nov 10 2020 Nikolay Khramchikhin - 1.46.0-1 - PMM-6401 upgrade victoriametrics for reading Prometheus data files -* Tue Oct 28 2020 Nikolay Khramchikhin - 1.45.0-1 +* Wed Oct 28 2020 Nikolay Khramchikhin - 1.45.0-1 - PMM-6401 upgrade victoriametrics for reading Prometheus data files * Tue Oct 13 2020 Aliaksandr Valialkin - 1.44.0-1 diff --git a/build/packer/pmm2.el9.json b/build/packer/pmm2.el9.json new file mode 100644 index 0000000000..016a5f1cae --- /dev/null +++ b/build/packer/pmm2.el9.json @@ -0,0 +1,198 @@ +{ + "variables": { + "single_disk": "false", + "pmm2_server_repo": "testing", + "pmm_client_repos": "original testing", + "pmm_client_repo_name": "percona-testing-x86_64" + }, + "builders": [ + { + "type": "amazon-ebs", + "ami_name": "PMM2 Server [{{isotime \"2006-01-02 1504\"}}]", + "instance_type": "c4.xlarge", + "launch_block_device_mappings": [ + { + "delete_on_termination": true, + "device_name": "/dev/sda1", + "volume_size": 10, + "volume_type": "gp2" + }, + { + "delete_on_termination": false, + "device_name": "/dev/sdb", + "volume_size": 100, + "volume_type": "gp2" + } + ], + "region": "us-east-1", + "security_group_id": "sg-688c2b1c", + "source_ami": "ami-0845395779540e3cb", + "ssh_pty": "true", + "ena_support": "true", + "ssh_username": "ec2-user", + "subnet_id": "subnet-ee06e8e1", + "run_tags": { + "iit-billing-tag": "pmm-ami" + }, + "run_volume_tags": { + "iit-billing-tag": "pmm-ami" + }, + "ssh_clear_authorized_keys": "true" + }, + { + "type": "azure-arm", + + "subscription_id": "41000701-4126-4674-9219-da03b1f9bb58", + "resource_group_name": "percona", + "storage_account": "percona", + "capture_container_name": "percona", + "capture_name_prefix": "pmm2-server-{{ user `pmm_version` }}", + + "os_type": "Linux", + "image_publisher": "OpenLogic", + "image_offer": "CentOS", + "image_sku": "7_9", + "image_version": "7.9.2021071900", + + "azure_tags": { + "dept": "Engineering", + "task": "PMM2 Image deployment" + }, + + "location": "East US", + "vm_size": "Standard_B2s", + "disk_additional_size": [30], + "ssh_clear_authorized_keys": "true" + }, + { + "type": "virtualbox-ovf", + "vm_name": "PMM2-Server-{{isotime \"2006-01-02-1504\"}}", + "export_opts": [ + "--ovf10", + "--manifest", + "--vsys", "0", + "--product", "Percona Monitoring and Management", + "--producturl", "https://www.percona.com/software/database-tools/percona-monitoring-and-management", + "--vendor", "Percona", + "--vendorurl", "https://www.percona.com", + "--version", "{{isotime \"2006-01-02\"}}", + "--description", "Percona Monitoring and Management (PMM) is an open-source platform for managing and monitoring MySQL, PostgreSQL and MongoDB" + ], + "format": "ovf", + "guest_additions_mode": "disable", + "headless": true, + "output_directory": "pmm2-virtualbox-ovf", + "shutdown_command": "rm -rf ~/.ssh/authorized_keys; cat /dev/zero > zero.fill; sync; sleep 1; sync; rm -f zero.fill; sudo shutdown -P now", + "source_path": ".cache/box/box.ovf", + "ssh_private_key_file": ".cache/id_rsa_vagrant", + "ssh_wait_timeout": "8m", + "ssh_pty": true, + "ssh_username": "vagrant", + "vboxmanage": [ + ["modifyvm", "{{.Name}}", "--memory", "4096"], + ["modifyvm", "{{.Name}}", "--audio", "none"], + ["modifyvm", "{{.Name}}", "--cpus", "4", "--vrdeport", "5050", "--vram", "10", "--graphicscontroller", "vmsvga", "--ioapic", "on"], + ["storagectl", "{{.Name}}", "--name", "IDE Controller", "--controller", "PIIX4", "--remove"], + ["storagectl", "{{.Name}}", "--name", "SATA Controller", "--controller", "IntelAhci", "--portcount", "2", "--hostiocache", "on", "--bootable", "on"], + ["createhd", "--variant", "STREAM", "--format", "VMDK", "--size", "409600", "--filename", "/tmp/box-disk002.vmdk"], + ["storageattach", "{{.Name}}", "--storagectl", "SATA Controller", "--port", "1", "--type", "hdd", "--medium", "/tmp/box-disk002.vmdk"] + ] + }, + { + "type": "digitalocean", + "ssh_username": "root", + "image": "centos-stream-9-x64", + "region": "nyc3", + "size": "s-2vcpu-2gb", + "snapshot_name": "PMM2 Server [{{isotime \"2006-01-02 1504\"}}]", + "ssh_clear_authorized_keys": "true" + } + ], + "provisioners": [ + { + "type": "shell", + "inline": [ + "sudo yum -y update", + "sudo yum -y install epel-release", + "sudo yum -y install ansible" + ] + }, + { + "type": "ansible-local", + "playbook_file": "ansible/pmm2/main.yml", + "extra_arguments": [ + "-v", + "-u root", + "--extra-vars", + "\"pmm2_server_repo='{{ user `pmm2_server_repo`}}' pmm_client_repo_name='{{ user `pmm_client_repo_name`}}' pmm_client_repos='{{ user `pmm_client_repos`}}'\"" + ], + "role_paths": [ + "ansible/roles/cloud-node", + "ansible/roles/lvm-init", + "ansible/roles/pmm2-images", + "ansible/roles/supervisord-init", + "ansible/roles/ami-ovf" + ] + }, + { + "type": "ansible-local", + "playbook_dir": "update/tasks", + "playbook_file": "update/tasks/update.yml", + "extra_arguments": ["-vvv", "-u root"], + "role_paths": [ + "update/tasks/roles/clickhouse", + "update/tasks/roles/dashboards_upgrade", + "update/tasks/roles/grafana", + "update/tasks/roles/initialization", + "update/tasks/roles/nginx", + "update/tasks/roles/postgres" + ] + }, + { + "type": "ansible-local", + "playbook_file": "ansible/pmm2/post-build-actions.yml", + "extra_arguments": ["-v"] + }, + { + "type": "file", + "only": ["digitalocean"], + "destination": "/home/admin/digitalocean_add_dbaas", + "source": "files/digitalocean/digitalocean_add_dbaas.py" + }, + { + "type": "shell", + "only": ["digitalocean"], + "inline": [ + "curl -s https://raw.githubusercontent.com/digitalocean/marketplace-partners/master/scripts/99-img-check.sh -o img_check.sh", + "sudo bash ./img_check.sh" + ] + } + ], + "post-processors": [ + [ + { + "type": "shell-local", + "only": ["virtualbox-ovf"], + "inline_shebang": "/bin/bash", + "inline": [ + "set -o errexit", + "set -o xtrace", + "pushd pmm2-virtualbox-ovf", + " NAME=$(ls *.ovf | sed -e 's/.ovf//')", + " sha256sum *.ovf *.vmdk | sed -E 's/^([^ ]+) ([^ ]+)$/SHA256(\\2)= \\1/' ${NAME}.mf", + " tar -cpf ${NAME}.ova *.ovf *.mf *-disk00*.vmdk", + " rm -rf *.ovf *.mf *-disk00*.vmdk", + "popd" + ] + } + ], + [ + { + "type": "manifest", + "only": ["amazon-ebs"], + "output": "manifest.json", + "strip_path": true + } + ] + ] +} diff --git a/build/packer/pmm2.json b/build/packer/pmm2.json index 1859c4fdef..74aa89ae6c 100644 --- a/build/packer/pmm2.json +++ b/build/packer/pmm2.json @@ -74,7 +74,7 @@ "--vendor", "Percona", "--vendorurl", "https://www.percona.com", "--version", "{{isotime \"2006-01-02\"}}", - "--description", "Percona Monitoring and Management (PMM) is an open-source platform for managing and monitoring MySQL and MongoDB performance" + "--description", "Percona Monitoring and Management (PMM) is an open-source platform for managing and monitoring MySQL, PostgreSQL and MongoDB" ], "format": "ovf", "guest_additions_mode": "disable", diff --git a/build/scripts/build-client-docker b/build/scripts/build-client-docker index cbe9db0a65..deab87e0d1 100755 --- a/build/scripts/build-client-docker +++ b/build/scripts/build-client-docker @@ -10,7 +10,8 @@ if [ -f "${docker_client_tarball}" ]; then exit 0 fi -cp ${root_dir}/results/tarball/pmm2-client-*.tar.gz ${root_dir}/tmp/source/pmm/build/docker/client/pmm2-client.tar.gz +DOCKER_FILE_LOCATION=tmp/source/pmm/build/docker/client # relative to `root_dir` +cp ${root_dir}/results/tarball/pmm2-client-*.tar.gz ${root_dir}/${DOCKER_FILE_LOCATION}/pmm2-client.tar.gz if [ -z "${DOCKER_CLIENT_TAG}" ]; then DOCKER_CLIENT_TAG=perconalab/pmm-client-fb:${full_pmm_version} @@ -21,7 +22,11 @@ CLIENT_IMAGE_VERSION=`echo $DOCKER_CLIENT_TAG | cut -d ':' -f2` docker build --build-arg BUILD_DATE="`date --rfc-3339=seconds`" \ --build-arg VERSION="$CLIENT_IMAGE_VERSION" \ - --squash --no-cache -t ${DOCKER_CLIENT_TAG} tmp/source/pmm/build/docker/client + --squash \ + --no-cache \ + -f ${DOCKER_FILE_LOCATION}/${docker_file} \ + -t ${DOCKER_CLIENT_TAG} \ + ${DOCKER_FILE_LOCATION} if [ -n "${PUSH_DOCKER}" ]; then mkdir -p $(dirname ${docker_client_tag_file}) diff --git a/build/scripts/build-client-packages b/build/scripts/build-client-packages index ea7fdc89b1..a620637051 100755 --- a/build/scripts/build-client-packages +++ b/build/scripts/build-client-packages @@ -22,7 +22,7 @@ Usage: $0 [OPTIONS] --help) usage ;; Example $0 --builddir=/tmp/PMM_CLIENT --get_sources=1 --build_src_rpm=1 --build_rpm=1 EOF - exit 1 + exit 1 } append_arg_to_args () { @@ -31,8 +31,7 @@ append_arg_to_args () { parse_arguments() { pick_args= - if test "$1" = PICK-ARGS-FROM-ARGV - then + if test "$1" = PICK-ARGS-FROM-ARGV; then pick_args=1 shift fi @@ -52,8 +51,7 @@ parse_arguments() { --install_deps=*) INSTALL="$val" ;; --help) usage ;; *) - if test -n "$pick_args" - then + if test -n "$pick_args"; then append_arg_to_args "$arg" fi ;; @@ -70,6 +68,7 @@ get_branches() { git reset --hard > /dev/null 2>&1 git clean -xdf > /dev/null 2>&1 git checkout $SUBMODULE_BRANCH > /dev/null 2>&1 + # this is the return value of this function git submodule status | grep $COMPONENT | awk '{print $1}' | awk -F'-' '{print $2}' cd - >/dev/null } @@ -88,13 +87,11 @@ get_repos() { } check_workdir(){ - if [ "x$WORKDIR" = "x$CURDIR" ] - then + if [ "x$WORKDIR" = "x$CURDIR" ]; then echo >&2 "Current directory cannot be used for building!" exit 1 else - if ! test -d "$WORKDIR" - then + if ! test -d "$WORKDIR"; then echo >&2 "$WORKDIR is not a directory." exit 1 fi @@ -104,21 +101,18 @@ check_workdir(){ get_sources(){ cd $WORKDIR - if [ $SOURCE = 0 ] - then + if [ $SOURCE = 0 ]; then echo "Sources will not be downloaded" return 0 fi git clone $REPO retval=$? - if [ $retval != 0 ] - then + if [ $retval != 0 ]; then echo "There were some issues during repo cloning from github. Please retry one more time" exit 1 fi cd pmm-client - if [ ! -z $BRANCH ] - then + if [ ! -z $BRANCH ]; then git reset --hard git clean -xdf git checkout $BRANCH @@ -185,18 +179,15 @@ get_system(){ } install_deps() { - if [ $INSTALL = 0 ] - then + if [ $INSTALL = 0 ]; then echo "Dependencies will not be installed" return; fi - if [ ! $( id -u ) -eq 0 ] - then + if [ ! $( id -u ) -eq 0 ]; then echo "It is not possible to instal dependencies. Please run as root" exit 1 fi - if [ "x$OS" = "xrpm" ] - then + if [ "$OS" = "rpm" ]; then yum -y install git wget rpmdevtools bison yum-utils rpm-build else apt-get update @@ -208,11 +199,9 @@ install_deps() { get_tar(){ TARBALL=$1 TARFILE=$(basename $(find $WORKDIR/$TARBALL -name 'pmm2-client*.tar.gz' | sort | tail -n1) || :) - if [ -z $TARFILE ] - then + if [ -z $TARFILE ]; then TARFILE=$(basename $(find $CURDIR/$TARBALL -name 'pmm2-client*.tar.gz' | sort | tail -n1) || :) - if [ -z $TARFILE ] - then + if [ -z $TARFILE ]; then echo "There is no $TARBALL for build" exit 1 else @@ -228,11 +217,9 @@ get_deb_sources(){ param=$1 echo $param FILE=$(basename $(find $WORKDIR/source_deb -name "pmm2-client*.$param" | sort | tail -n1) || :) - if [ -z $FILE ] - then + if [ -z $FILE ]; then FILE=$(basename $(find $CURDIR/source_deb -name "pmm2-client*.$param" | sort | tail -n1) || :) - if [ -z $FILE ] - then + if [ -z $FILE ]; then echo "There is no sources for build" exit 1 else @@ -245,31 +232,31 @@ get_deb_sources(){ } build_srpm(){ - if [ $SRPM = 0 ] - then + if [ $SRPM = 0 ]; then echo "SRC RPM will not be created" return; fi - if [ "x$OS" = "xdeb" ] - then + if [ "$OS" = "deb" ]; then echo "It is not possible to build src rpm here" exit 1 fi cd $WORKDIR get_tar "tarball" + # rm -fr rpmbuild ls | grep -v tar.gz | xargs rm -rf + # TARFILE=$(basename $(find . -name 'pmm2-client-*.tar.gz' | sort | tail -n1)) NAME=$(echo ${TARFILE}| awk -F '-' '{print $1"-"$2}') VERSION_TMP=$(echo ${TARFILE}| awk -F '-' '{print $3}') VERSION=${VERSION_TMP%.tar.gz} + # - mkdir -vp rpmbuild/{SOURCES,SPECS,BUILD,SRPMS,RPMS} + # - tar -C ${WORKDIR} -zxpf ${TARFILE} ${NAME}-${VERSION}/rpm # cd ${WORKDIR}/rpmbuild/SPECS @@ -281,27 +268,22 @@ build_srpm(){ mkdir -p ${CURDIR}/srpm cp rpmbuild/SRPMS/*.src.rpm ${CURDIR}/srpm cp rpmbuild/SRPMS/*.src.rpm ${WORKDIR}/srpm - # - } build_rpm(){ - if [ $RPM = 0 ] - then + if [ $RPM = 0 ]; then echo "RPM will not be created" return; fi - if [ "x$OS" = "xdeb" ] - then + if [ "$OS" = "deb" ]; then echo "It is not possible to build rpm here" exit 1 fi + SRC_RPM=$(basename $(find $WORKDIR/srpm -name 'pmm2-client*.src.rpm' | sort | tail -n1) || :) - if [ -z $SRC_RPM ] - then + if [ -z $SRC_RPM ]; then SRC_RPM=$(basename $(find $CURDIR/srpm -name 'pmm2-client*.src.rpm' | sort | tail -n1) || :) - if [ -z $SRC_RPM ] - then + if [ -z $SRC_RPM ]; then echo "There is no src rpm for build" echo "You can create it using key --build_src_rpm=1" exit 1 @@ -311,11 +293,16 @@ build_rpm(){ else cp $WORKDIR/srpm/$SRC_RPM $WORKDIR fi + cd $WORKDIR rm -fr rpmbuild mkdir -vp rpmbuild/{SOURCES,SPECS,BUILD,SRPMS,RPMS} cp $SRC_RPM rpmbuild/SRPMS/ - rpmbuild --define "_topdir ${WORKDIR}/rpmbuild" --define "version $pmm_version" --define "release $RPM_RELEASE" --define "dist .$OS_NAME" --rebuild rpmbuild/SRPMS/$SRC_RPM + rpmbuild --define "_topdir ${WORKDIR}/rpmbuild" \ + --define "version $pmm_version" \ + --define "release $RPM_RELEASE" \ + --define "dist .$OS_NAME" \ + --rebuild rpmbuild/SRPMS/$SRC_RPM return_code=$? if [ $return_code != 0 ]; then @@ -325,17 +312,14 @@ build_rpm(){ mkdir -p ${CURDIR}/rpm cp rpmbuild/RPMS/*/*.rpm ${WORKDIR}/rpm cp rpmbuild/RPMS/*/*.rpm ${CURDIR}/rpm - } build_source_deb(){ - if [ $SDEB = 0 ] - then + if [ $SDEB = 0 ]; then echo "source deb package will not be created" return; fi - if [ "x$OS" = "xrmp" ] - then + if [ "$OS" = "rpm" ]; then echo "It is not possible to build source deb here" exit 1 fi @@ -410,20 +394,18 @@ build_source_deb(){ } build_deb(){ - if [ $DEB = 0 ] - then + if [ $DEB = 0 ]; then echo "source deb package will not be created" return; fi - if [ "x$OS" = "xrmp" ] - then + if [ "$OS" = "rpm" ]; then echo "It is not possible to build source deb here" exit 1 fi - for file in 'dsc' 'orig.tar.gz' 'changes' 'diff.gz' - do + for file in 'dsc' 'orig.tar.gz' 'changes' 'diff.gz'; do get_deb_sources $file done + cd $WORKDIR rm -fv *.deb export DEBIAN_VERSION="$(lsb_release -sc)" @@ -478,7 +460,7 @@ install_deps BRANCH="PMM-2.0" REPO="https://github.com/Percona-Lab/pmm-submodules.git" -if [ x"$TARBALL" = x'1' -o x"$SOURCE" = x'1' ]; then +if [ "$TARBALL" = '1' -o "$SOURCE" = '1' ]; then MongoExp_BRANCH_NAME=$(get_branches "mongodb_exporter") MongoExp_REPO=$(get_repos "mongodb_exporter") TOOLKIT_BRANCH_NAME=$(get_branches "percona-toolkit") diff --git a/build/scripts/build-client-srpm b/build/scripts/build-client-srpm index eeec659084..aff1082492 100755 --- a/build/scripts/build-client-srpm +++ b/build/scripts/build-client-srpm @@ -15,7 +15,10 @@ main() { set -o xtrace export pmm_release=$pmm_release - export sudo_path=\$(ls /usr/bin/sudo) + export sudo_path= + if [ -e /usr/bin/sudo ]; then + export sudo_path=\$(ls /usr/bin/sudo) + fi [[ ${IMAGE} = ${rpmbuild_docker_image} ]] || \$sudo_path yum -y install git rpm-build mkdir -p /tmp/pmm pushd /home/builder/results diff --git a/build/scripts/build-rpmbuild-docker b/build/scripts/build-rpmbuild-docker index c850782f4e..77706f3621 100755 --- a/build/scripts/build-rpmbuild-docker +++ b/build/scripts/build-rpmbuild-docker @@ -6,6 +6,6 @@ set -o xtrace . $(dirname $0)/vars root_dir=$(cd $(dirname $0)/../..; pwd -P) -docker build --squash -t rpmbuild:local ${root_dir}/build/docker/rpmbuild +docker build --squash -f ${docker_file} -t rpmbuild:local ${root_dir}/build/docker/rpmbuild/ # vim: expandtab shiftwidth=4 tabstop=4 diff --git a/build/scripts/build-server b/build/scripts/build-server index bf19f70695..7a2b053be9 100755 --- a/build/scripts/build-server +++ b/build/scripts/build-server @@ -16,9 +16,6 @@ cp ${root_dir}/results/rpm/*.rpm ${rpms_dir}/ # install all rpms ${bin_dir}/build-server-rpm-all -# 3rd-party -${bin_dir}/build-server-rpm grafana-db-migrator - # build pmm-server ${bin_dir}/build-server-docker diff --git a/build/scripts/build-server-docker b/build/scripts/build-server-docker index c5d8b9adf9..8b3b379c8e 100755 --- a/build/scripts/build-server-docker +++ b/build/scripts/build-server-docker @@ -10,17 +10,22 @@ if [ -f "${docker_tarball}" ]; then exit 0 fi -cp -r ${root_dir}/tmp/source/pmm/build/ansible ${rpms_dir}/../ansible +docker_root=$(realpath ${rpms_dir}/..) +cp -r ${root_dir}/tmp/source/pmm/build/ansible ${docker_root}/ansible +cp ${root_dir}/tmp/source/pmm/build/docker/server/* ${docker_root}/ # enable 'local' repo in ansible playbooks -sed -i -e "s/enablerepo: \"pmm2-server\"/enablerepo: local/" ${rpms_dir}/../ansible/roles/pmm2-images/tasks/main.yml -sed -i -e "s/pmm_client_repo_name: \".*\"/pmm_client_repo_name: local/" ${rpms_dir}/../ansible/roles/pmm2-images/vars/main.yml +sed -i -e "s/enablerepo: \"pmm2-server\"/enablerepo: local/" ${docker_root}/ansible/roles/pmm2-images/tasks/main.yml +sed -i -e "s/pmm_client_repo_name: \".*\"/pmm_client_repo_name: local/" ${docker_root}/ansible/roles/pmm2-images/vars/main.yml -cp ${root_dir}/tmp/source/pmm/build/docker/server/* ${rpms_dir}/../ -git -C ${root_dir} rev-parse HEAD > ${rpms_dir}/../gitCommit +git -C ${root_dir} rev-parse HEAD > ${docker_root}/gitCommit +ls -la ${rpms_dir} docker run --rm -v ${rpms_dir}:/home/builder/rpm/RPMS ${rpmbuild_docker_image} sh -c " sudo chown -R builder /home/builder/rpm/RPMS - /usr/bin/createrepo_c --update /home/builder/rpm/RPMS + until /usr/bin/createrepo_c --update /home/builder/rpm/RPMS; do + echo "waiting" + sleep 1 + done " if [ -z "${DOCKER_TAG}" ]; then @@ -29,12 +34,13 @@ fi IMAGE_VERSION=`echo $DOCKER_TAG | cut -d ':' -f2` - docker build --build-arg BUILD_DATE="`date --rfc-3339=seconds`" \ --build-arg VERSION="$IMAGE_VERSION" \ --squash \ --no-cache \ - -t ${DOCKER_TAG} ${rpms_dir}/../ + -f ${docker_root}/${docker_file} \ + -t ${DOCKER_TAG} \ + ${docker_root}/ if [ -n "${PUSH_DOCKER}" ]; then mkdir -p $(dirname ${docker_tag_file}) @@ -43,7 +49,7 @@ if [ -n "${PUSH_DOCKER}" ]; then fi if [ -n "${SAVE_DOCKER}" ]; then mkdir -p $(dirname ${docker_tarball}) - docker save ${DOCKER_TAG} | xz > ${docker_tarball} + docker save ${DOCKER_TAG} > ${docker_tarball} fi # vim: expandtab shiftwidth=4 tabstop=4 diff --git a/build/scripts/build-server-rpm b/build/scripts/build-server-rpm index 0e4a213e10..09ddbb83c1 100755 --- a/build/scripts/build-server-rpm +++ b/build/scripts/build-server-rpm @@ -11,7 +11,7 @@ get_rpm_version() { local rpm_version=$( docker run --rm -v ${rpmbuild_dir}/SOURCES:/home/builder/rpm/SOURCES ${rpmbuild_docker_image} sh -c " rpmspec -q --qf '%{version}-%{release}\n' SOURCES/${spec_name}.spec \ - | sed -re 's/\.[0-9]{10}\././; s/.el7//' \ + | sed -re 's/\.[0-9]{10}\././; s/.${rpmbuild_dist}//' \ | head -1 " ) @@ -23,24 +23,32 @@ get_rpm_version() { is_build_needed() { local spec_name=$1 local rpm_version=$2 + local packages= + # Structure of S3 build cache + # s3://pmm-build-cache/PR-BUILDS/7 - el7 + # s3://pmm-build-cache/PR-BUILDS/9 - el9 + # s3://pmm-build-cache/RELEASE/7 - el7 + # s3://pmm-build-cache/RELEASE/9 - el9 + + # This is for el9 if [ -n "$RPM_EPOCH" ]; then aws s3 sync \ --region us-east-2 \ --no-sign-request \ - s3://pmm-build-cache/PR-BUILDS/${spec_name}-${rpm_version} \ + s3://pmm-build-cache/PR-BUILDS/${rpmbuild_dist}/${spec_name}-${rpm_version} \ ${rpms_dir}/${spec_name}-${rpm_version} else aws s3 sync \ --region us-east-2 \ --no-sign-request \ - s3://pmm-build-cache/${spec_name}-${rpm_version} \ + s3://pmm-build-cache/RELEASE/${rpmbuild_dist}/${spec_name}-${rpm_version} \ ${rpms_dir}/${spec_name}-${rpm_version} fi - local packages=$(find ${rpms_dir}/${spec_name}-${rpm_version} -name "*.rpm" | wc -l) + packages=$(find ${rpms_dir}/${spec_name}-${rpm_version} -name "*.${rpmbuild_dist}.*.rpm" | wc -l) - # return result as true or flase + # return result as true or false [[ ${packages// /} == 0 ]] } @@ -91,8 +99,9 @@ build() { local CH_VERSION=${rpm_version%-*} local CH_TAG=${rpm_version#*-} - if is_build_needed "${spec_name}" "${rpm_version}" || [[ -n "${FORCE_REBUILD}" ]]; then + if [[ -n "${FORCE_REBUILD}" ]] || is_build_needed "${spec_name}" "${rpm_version}"; then echo "RPM Build Dir: ${rpmbuild_dir}, Docker image: ${rpmbuild_docker_image}" + docker run --rm -v ${rpmbuild_dir}/SOURCES:/home/builder/rpm/SOURCES -v ${rpms_dir}:/home/builder/rpm/RPMS ${rpmbuild_docker_image} sh -c " set -o errexit set -o xtrace @@ -112,35 +121,37 @@ build() { printf '[local]\nname=local\nbaseurl=file:///home/builder/rpm/RPMS\ngpgcheck=0\nenabled=1\n' \ | sudo tee /etc/yum.repos.d/local.repo - /usr/bin/createrepo_c --update /home/builder/rpm/RPMS + until /usr/bin/createrepo_c --update /home/builder/rpm/RPMS; do + echo waiting + sleep 1 + done - sudo yum-builddep -y SOURCES/${spec_name}.spec + sudo yum-builddep --randomwait=5 -y SOURCES/${spec_name}.spec spectool -C SOURCES -g SOURCES/${spec_name}.spec - rpmbuild --define '_rpmdir %{_topdir}/RPMS/${spec_name}-${rpm_version}' --define 'dist .el7' -ba SOURCES/${spec_name}.spec + rpmbuild --define '_rpmdir %{_topdir}/RPMS/${spec_name}-${rpm_version}' --define 'dist .${rpmbuild_dist}' -ba SOURCES/${spec_name}.spec rm -f SOURCES/${spec_name}.spec* sudo chown -R $(id -u):$(id -g) /home/builder/rpm/RPMS /home/builder/rpm/SOURCES " - if [ -n "$RPM_EPOCH" ]; then aws s3 sync \ --region us-east-2 \ ${rpms_dir}/${spec_name}-${rpm_version} \ - s3://pmm-build-cache/PR-BUILDS/${spec_name}-${rpm_version} \ + s3://pmm-build-cache/PR-BUILDS/${rpmbuild_dist}/${spec_name}-${rpm_version} \ || : else aws s3 sync \ --region us-east-2 \ ${rpms_dir}/${spec_name}-${rpm_version} \ - s3://pmm-build-cache/${spec_name}-${rpm_version} \ + s3://pmm-build-cache/RELEASE/${rpmbuild_dist}/${spec_name}-${rpm_version} \ || : fi fi } build "$1" "$2" -echo DONE +echo "build-server-rpm finished, spec_name: $1, repo_name: ${2:-$1}" # vim: expandtab shiftwidth=4 tabstop=4 diff --git a/build/scripts/build-server-rpm-all b/build/scripts/build-server-rpm-all index 11e037c02f..d9cb1a59a2 100755 --- a/build/scripts/build-server-rpm-all +++ b/build/scripts/build-server-rpm-all @@ -19,5 +19,6 @@ ${bin_dir}/build-server-rpm vmproxy pmm ${bin_dir}/build-server-rpm victoriametrics ${bin_dir}/build-server-rpm alertmanager ${bin_dir}/build-server-rpm grafana +# ${bin_dir}/build-server-rpm grafana-db-migrator # vim: expandtab shiftwidth=4 tabstop=4 diff --git a/build/scripts/build-submodules b/build/scripts/build-submodules new file mode 100755 index 0000000000..34134e794b --- /dev/null +++ b/build/scripts/build-submodules @@ -0,0 +1,42 @@ +#!/bin/bash +set -o errexit + +if [ -s ci.yml ]; then + sudo rm -rf results tmp || : + python3 ci.py + . ./.git-sources + echo $pmm_commit > apiCommitSha + echo $pmm_branch > apiBranch + echo $pmm_url > apiURL + echo $pmm_qa_branch > pmmQABranch + echo $pmm_qa_commit > pmmQACommitSha + echo $pmm_ui_tests_branch > pmmUITestBranch + echo $pmm_ui_tests_commit > pmmUITestsCommitSha +else + sudo rm -rf results tmp || : + git reset --hard + git clean -fdx + git submodule foreach --recursive git reset --hard + git submodule foreach --recursive git clean -fdx + git submodule status + export commit_sha=$(git submodule status | grep 'pmm-managed' | awk -F ' ' '{print $1}') + export api_tests_commit_sha=$(git submodule status | grep 'pmm' | awk -F ' ' '{print $1}') + export api_tests_branch=$(git config -f .gitmodules submodule.pmm.branch) + export api_tests_url=$(git config -f .gitmodules submodule.pmm.url) + echo $api_tests_commit_sha > apiCommitSha + echo $api_tests_branch > apiBranch + echo $api_tests_url > apiURL + cat apiBranch + cat apiURL + export pmm_qa_commit_sha=$(git submodule status | grep 'pmm-qa' | awk -F ' ' '{print $1}') + export pmm_qa_branch=$(git config -f .gitmodules submodule.pmm-qa.branch) + echo $pmm_qa_branch > pmmQABranch + echo $pmm_qa_commit_sha > pmmQACommitSha + export pmm_ui_tests_commit_sha=$(git submodule status | grep 'pmm-ui-tests' | awk -F ' ' '{print $1}') + export pmm_ui_tests_branch=$(git config -f .gitmodules submodule.pmm-ui-tests.branch) + echo $pmm_ui_tests_branch > pmmUITestBranch + echo $pmm_ui_tests_commit_sha > pmmUITestsCommitSha +fi + +export fb_commit_sha=$(git rev-parse HEAD) +echo $fb_commit_sha > fbCommitSha diff --git a/build/scripts/create-tags.py b/build/scripts/create-tags.py index bb6cbdf607..a8c3a426eb 100755 --- a/build/scripts/create-tags.py +++ b/build/scripts/create-tags.py @@ -4,9 +4,21 @@ import os, subprocess, time REPOS = [ + "sources/pmm/src/github.com/percona/pmm", + "sources/dbaas-controller/src/github.com/percona-platform/dbaas-controller", + "sources/pmm-dump", + "sources/pmm-qa/src/github.com/percona/pmm-qa", + "sources/pmm-ui-tests/src/github.com/percona/pmm-ui-tests", "sources/grafana/src/github.com/grafana/grafana", "sources/grafana-dashboards", - "sources/pmm/src/github.com/percona/pmm", + "sources/node_exporter/src/github.com/prometheus/node_exporter", + "sources/mysqld_exporter/src/github.com/percona/mysqld_exporter", + "sources/mongodb_exporter/src/github.com/percona/mongodb_exporter", + "sources/postgres_exporter/src/github.com/percona/postgres_exporter", + "sources/clickhouse_exporter/src/github.com/Percona-Lab/clickhouse_exporter", + "sources/proxysql_exporter/src/github.com/percona/proxysql_exporter", + "sources/rds_exporter/src/github.com/percona/rds_exporter", + "sources/azure_metrics_exporter/src/github.com/percona/azure_metrics_exporter", ".", ] diff --git a/build/scripts/vars b/build/scripts/vars index 401f4088e1..bd267aa95e 100644 --- a/build/scripts/vars +++ b/build/scripts/vars @@ -1,5 +1,9 @@ bin_dir=$(cd $(dirname $0); pwd -P) -root_dir=$(cd $(dirname $0)/../../../../../../../..; pwd -P) +# TODO: refactor to pass ${WORKSPACE} as ROOT_DIR from either Jenkins or, if relevant, from GH actions +# NOTE: in most cases, this evaluates to Jenkins ${WORKSPACE} directory when called from pipelines +# Example: /home/ec2-user/workspace/ol9-build-server/, see how it's used in ol9-build-server.groovy +root_dir_tmp=$(cd $(dirname $0)/../../../../../../../..; pwd -P) +root_dir=${ROOT_DIR:-$root_dir_tmp} tmp_dir=${root_dir}/tmp # In VERSION file we can have numeric value like '2.0.0' as well as @@ -7,7 +11,7 @@ tmp_dir=${root_dir}/tmp # e.g. in Version directive in spec files. So we define: # * full_pmm_version to contain full version and build metadata: '2.0.0-alpha3-PMM-1234-fb-branch-123abc'; # * pmm_version to contain only MAJOR.MINOR.PATCH: '2.0.0'; -# * pmm_release to contain only pre-release part (may be empty): 'alpha3'. +# * pmm_release to contain only pre-release part (can be empty): 'alpha3'. pmm_branch=$(git rev-parse --abbrev-ref HEAD) pmm_base_version=$(cat ${root_dir}/VERSION) @@ -39,9 +43,11 @@ echo -e "\n\n\n>>> full_pmm_version=${full_pmm_version} pmm_version=${pmm_versio rpmbuild_docker_image=${RPMBUILD_DOCKER_IMAGE:-public.ecr.aws/e7j3v3n0/rpmbuild:2} rpms_dir=${root_dir}/tmp/pmm-server/RPMS rpmbuild_dir=${root_dir}/sources/pmm/src/github.com/percona/pmm/build/packages/rpm/server +rpmbuild_dist=${RPMBUILD_DIST:-"el7"} source_dir=${root_dir}/tmp/source/pmm2-client-${pmm_version} binary_dir=${root_dir}/tmp/binary/pmm2-client-${pmm_version} client_properties=${root_dir}/results/pmm-client.properties +docker_file=${DOCKERFILE:-Dockerfile} docker_tag_file=${root_dir}/results/docker/TAG docker_client_tag_file=${root_dir}/results/docker/CLIENT_TAG docker_tarball=${root_dir}/results/docker/pmm-server-${pmm_version}.docker diff --git a/cli-tests/CONTRIBUTING.md b/cli-tests/CONTRIBUTING.md new file mode 100644 index 0000000000..093b7ae967 --- /dev/null +++ b/cli-tests/CONTRIBUTING.md @@ -0,0 +1,51 @@ +# Contributing + +### Project Licenses + +- All modules use [Apache License v2.0](LICENSE.md). + +## Test suite structure + +### Assertions + +* **Assertion can be used directly in a test.** + Playwright provides wide [matchers list](https://playwright.dev/docs/test-assertions) that can be used directly inside a test. + + + +* **Declaration of a test variable** should be done on top of the test + +### Test Grouping + +* **One feature per spec file.** +This will help to split tests and order the execution for workers parallel mode. It will provide control over test execution time and increase maintainability of test suite. + + +## Coding Conventions + +### Naming Conventions + +* **Acronyms** + Whenever an acronym is included as part of a type name or method name, keep the first + letter of the acronym uppercase and use lowercase for the rest of the acronym. Otherwise, + it becomes potentially very difficult to read or reason about the element without + reading documentation (if documentation even exists). + +       Consider for example a use case needing to support an HTTP URL. Calling the method + `getHTTPURL()` is absolutely horrible in terms of usability; whereas, `getHttpUrl()` is + great in terms of usability. The same applies for types `HTTPURLProvider` vs + `HttpUrlProvider`. + | + Whenever an acronym is included as part of a field name or parameter name: + * If the acronym comes at the start of the field or parameter name, use lowercase for the entire acronym, ex: `url; id;` + * Otherwise, keep the first letter of the acronym uppercase and use lowercase for the rest of the acronym, ex: `baseUrl; userId;` + + +* **Methods.** + * Methods should be named as actions with camelCase (changeSorting, changeGrouping, etc..) + * General preferable declaration style is _fat arrow function_, + ex: `const add = (a, b) => a + b;` + + +* **Test Files.** + Test files should be named with camelCase and auxiliary or search part should be added with dash, ex: `mongoDb-integration.spec.ts;` diff --git a/cli-tests/README.md b/cli-tests/README.md index 15c7daee63..cd66530e22 100644 --- a/cli-tests/README.md +++ b/cli-tests/README.md @@ -4,7 +4,7 @@ Percona Monitoring and Management CLI automated tests. ## Getting Started -* Open the _**cli**_ folder in console +* Open the _**cli-tests**_ folder in console * Install Node.js 12+ version and make sure npx is included * Install project dependencies: `npm ci` * Install "playwright": `npx playwright install` @@ -31,6 +31,5 @@ Execute command in the Project Root folder ## Contributing - -_coming soon_ +For the specific contributions guidelines, please see [CONTRIBUTING.md](CONTRIBUTING.md) . diff --git a/cli-tests/global-teardown.ts b/cli-tests/global-teardown.ts new file mode 100644 index 0000000000..683549f728 --- /dev/null +++ b/cli-tests/global-teardown.ts @@ -0,0 +1,8 @@ +import { FullConfig } from '@playwright/test'; +import { cleanUpDocker } from '@helpers/containers'; + +const globalTeardown = async (config: FullConfig) => { + await cleanUpDocker(); +}; + +export default globalTeardown; diff --git a/cli-tests/helpers/cliHelper.ts b/cli-tests/helpers/cliHelper.ts index 64cff7fb03..6daad4e85f 100644 --- a/cli-tests/helpers/cliHelper.ts +++ b/cli-tests/helpers/cliHelper.ts @@ -1,21 +1,21 @@ -import assert from 'assert'; import { test } from '@playwright/test'; -import Output from '@support/types/output'; +import Output from '@support/types/Output'; import * as shell from 'shelljs'; -export function verifyCommand(command: string, result = 'pass', getError = false): string { - // eslint-disable-next-line @typescript-eslint/no-unsafe-call,@typescript-eslint/no-unsafe-member-access,@typescript-eslint/no-unsafe-assignment - const { stdout, stderr, code } = shell.exec(command.replace(/(\r\n|\n|\r)/gm, ''), { silent: true }); - if (result === 'pass') { - assert.ok(code === 0, `The command ${command} was expected to run without any errors, the error found ${stderr}`); - } else { - assert.ok(code !== 0, `The command ${command} was expected to return with failure but found to be executing without any error, the return code found ${code}`); - } - - if (!getError) return stdout as string; - - return stderr as string; -} +/** + * Shell(sh) exec() wrapper to use outside {@link test} + * returns handy {@link Output} object. + * + * @param command sh command to execute + * @return {@link Output} instance + */ +export const execute = (command: string): Output => { + console.log(`exec: "${command}"`); + const { stdout, stderr, code } = shell.exec(command.replace(/(\r\n|\n|\r)/gm, ''), { silent: false }); + if (stdout.length > 0) console.log(`Out: "${stdout}"`); + if (stderr.length > 0) console.log(`Error: "${stderr}"`); + return new Output(command, code, stdout, stderr); +}; /** * Shell(sh) exec() wrapper to return handy {@link Output} object. @@ -23,12 +23,21 @@ export function verifyCommand(command: string, result = 'pass', getError = false * @param command sh command to execute * @return {@link Output} instance */ -export async function exec(command: string): Promise { - // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment +export const exec = async (command: string): Promise => { + return test.step(`Run "${command}" command`, async () => { + return execute(command); + }); +}; +/** + * Silent Shell(sh) exec() wrapper to return handy {@link Output} object. + * Provides no logs to skip huge outputs. + * + * @param command sh command to execute + * @return {@link Output} instance + */ +export const execSilent = async (command: string): Promise => { const { stdout, stderr, code } = await test.step(`Run "${command}" command`, async () => { - // eslint-disable-next-line @typescript-eslint/no-unsafe-call,@typescript-eslint/no-unsafe-member-access,@typescript-eslint/no-unsafe-return return shell.exec(command.replace(/(\r\n|\n|\r)/gm, ''), { silent: false }); }); - - return new Output(command, code as number, stdout as string, stderr as string); -} + return new Output(command, code, stdout, stderr); +}; diff --git a/cli-tests/helpers/containers.ts b/cli-tests/helpers/containers.ts index ccc1e956df..5637a3296c 100644 --- a/cli-tests/helpers/containers.ts +++ b/cli-tests/helpers/containers.ts @@ -1,15 +1,52 @@ -import * as cli from "./cliHelper" +import * as cli from './cliHelper'; -export async function teardown(containers: string[], volumes: string[]): Promise { - const cmds = [] - for(const container of containers) { - cmds.push(`docker ps -a --format="{{.Names}}" | grep -E "${container}" | xargs --no-run-if-empty docker rm -f`) - } +export const teardown = async (containers: string[], volumes: string[]): Promise => { + const cmds: string[] = []; - for(const volume of volumes) { - cmds.push(`docker volume ls -q | grep -E "${volume}" | xargs --no-run-if-empty docker volume rm`) - } + for (const container of containers) { + cmds.push(`docker ps -a --format="{{.Names}}" | grep -E "${container}" | xargs --no-run-if-empty docker rm -f`); + } - await Promise.all(cmds.map(cmd => cli.exec(cmd))) - .catch(err => console.error(err)) -} + for (const volume of volumes) { + cmds.push(`docker volume ls -q | grep -E "${volume}" | xargs --no-run-if-empty docker volume rm`); + } + + await Promise.all(cmds.map((cmd) => cli.exec(cmd))) + .catch((err) => console.error(err)); +}; + +/** + * Removes docker containers and volumes found by default name patterns: "*pmm-server*" and "*pmm-data*" + */ +export const cleanUpDocker = async (): Promise => { + const cmds: string[] = []; + + for (const container of + cli.execute('docker ps -a --format="{{.Names}}" | grep -E "pmm-server"').getStdOutLines()) { + cmds.push(`docker ps -a --format="{{.Names}}" | grep -E "^${container}$" | xargs --no-run-if-empty docker rm -f`); + } + + for (const volume of cli.execute('docker volume ls -q | grep -E "pmm-data"').getStdOutLines()) { + cmds.push(`docker volume ls -q | grep -E "^${volume}$" | xargs --no-run-if-empty docker volume rm`); + } + await Promise.all(cmds.map((cmd) => cli.execute(cmd))) + .catch((err) => console.error(err)); +}; + +/** + * Removes code duplication in "pmm-cli/server/upgrade.spec.ts" + * Runs pmm-server using docker command with specified parameters. + * + * @param httpPort binds port 80 redirect + * @param httpsPort binds port 443 redirect + * @param volumeName name for the docker volume + * @param containerName name for the container + */ +export const runOldPmmServer = async (httpPort: number, httpsPort: number, volumeName: string, containerName: string) => { + await (await cli.exec(`docker run --detach --restart always + --publish ${httpPort}:80 + --publish ${httpsPort}:443 + -v ${volumeName}:/srv + --name ${containerName} + ${process.env.old_server_image}`)).assertSuccess(); +}; diff --git a/cli-tests/helpers/customAssertions.ts b/cli-tests/helpers/customAssertions.ts new file mode 100644 index 0000000000..eeb85ba93e --- /dev/null +++ b/cli-tests/helpers/customAssertions.ts @@ -0,0 +1,72 @@ +import { expect } from '@playwright/test'; +import PmmRestClient from '@support/types/PmmRestClient'; +import * as cli from './cliHelper'; + +/** + * Encapsulates all checks for running "PMM Server" container in docker. + * All verifications are taken based on specified parameters. + * Object properties are optional and verifies each "pmm docker command" flag: + * { + * containerName: "verifies --container-name", + * imageName: "verifies --docker-image, requires containerName", + * volumeName: "verifies --volume-name", + * httpPort: "verifies --http-listen-port, requires adminPassword", + * httpsPort: "verifies --https-listen-port, requires adminPassword", + * adminPassword: "verifies --admin-password, embedded into ports check" + * } + * + * @param checks Object with checks to execute: + */ +export const verifyPmmServerProperties = async (checks: { + containerName?: string, + imageName?: string, + volumeName?: string, + httpPort?: number, + httpsPort?: number, + adminPassword?: string }) => { + // verify --container-name + if (checks.containerName !== undefined) { + await (await cli.exec('docker ps --format="{{.Names}}"')).outHasLine(checks.containerName); + } + + // verify --volume-name + if (checks.volumeName !== undefined) { + await (await cli.exec('docker volume ls --format="{{.Name}}"')).outHasLine(checks.volumeName); + } + + // verify --docker-image + if (checks.imageName !== undefined && checks.containerName !== undefined) { + await (await cli.exec('docker ps --format="{{.Names}} {{.Image}}"')) + .outHasLine(`${checks.containerName} ${checks.imageName}`); + } + + // verify --http-listen-port + if (checks.httpPort !== undefined && checks.adminPassword !== undefined) { + const httpClient = new PmmRestClient('admin', checks.adminPassword, checks.httpPort); + + await expect(async () => { + const resp = await httpClient.post('/v1/Settings/Get', {}); + await expect(resp, `http ${checks.httpPort} port and password should work`).toBeOK(); + expect(await resp.json(), 'response body should have "settings"').toHaveProperty('settings'); + }).toPass({ + // Probe, wait 1s, probe, wait 2s, probe, wait 2s, probe, wait 2s, probe, .... + intervals: [1_000, 2_000, 2_000], + timeout: 60_000, + }); + } + + // verify --https-listen-port + if (checks.httpsPort !== undefined && checks.adminPassword !== undefined) { + const httpsClient = new PmmRestClient('admin', checks.adminPassword, checks.httpsPort, 'https'); + + await expect(async () => { + const resp = await httpsClient.post('/v1/Settings/Get', {}); + await expect(resp, `https ${checks.httpsPort} port and password should work`).toBeOK(); + expect(await resp.json(), 'response body should have "settings"').toHaveProperty('settings'); + }).toPass({ + // Probe, wait 1s, probe, wait 1s, probe, wait 2s, probe, wait 2s, probe, .... + intervals: [1_000, 2_000, 2_000], + timeout: 30_000, + }); + } +}; diff --git a/cli-tests/package-lock.json b/cli-tests/package-lock.json index 415939a043..995283807a 100644 --- a/cli-tests/package-lock.json +++ b/cli-tests/package-lock.json @@ -1,27 +1,31 @@ { "name": "cli", "version": "1.0.0", - "lockfileVersion": 2, + "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "cli", "version": "1.0.0", "dependencies": { - "@playwright/test": "^1.32.3", + "@playwright/test": "^1.34.2", + "@types/luxon": "^3.3.0", "dotenv": "^16.0.3", - "playwright": "^1.32.3", + "luxon": "^3.3.0", + "playwright": "^1.33.0", "promise-retry": "^2.0.1", "shelljs": "^0.8.5", "typescript": "^5.0.4" }, "devDependencies": { "@types/promise-retry": "^1.1.3", - "@typescript-eslint/eslint-plugin": "^5.59.0", - "@typescript-eslint/parser": "^5.59.0", - "eslint": "8.38", + "@types/shelljs": "^0.8.12", + "@typescript-eslint/eslint-plugin": "^5.59.7", + "@typescript-eslint/parser": "^5.59.6", + "eslint": "8.41", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-typescript": "^17.0.0", + "eslint-plugin-import": "^2.27.5", "eslint-plugin-playwright": "^0.12.0" } }, @@ -50,14 +54,14 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", - "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.3.tgz", + "integrity": "sha512-+5gy6OQfk+xx3q0d6jGZZC3f3KzAkXc/IanVxd1is/VIIziRqqt3ongQz0FiTUXqTk0c7aDB3OaFuKnuSoJicQ==", "dev": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.5.1", + "espree": "^9.5.2", "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", @@ -73,9 +77,9 @@ } }, "node_modules/@eslint/js": { - "version": "8.38.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.38.0.tgz", - "integrity": "sha512-IoD2MfUnOV58ghIHCiil01PcohxjbYR/qCxsoC+xNgUwh1EY8jOOrYmu3d3a71+tJJ23uscEV4X2HJWMsPJu4g==", + "version": "8.41.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.41.0.tgz", + "integrity": "sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -150,12 +154,12 @@ } }, "node_modules/@playwright/test": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.32.3.tgz", - "integrity": "sha512-BvWNvK0RfBriindxhLVabi8BRe3X0J9EVjKlcmhxjg4giWBD/xleLcg2dz7Tx0agu28rczjNIPQWznwzDwVsZQ==", + "version": "1.34.2", + "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.34.2.tgz", + "integrity": "sha512-v/LBnwzD0225q8xEv3t9DmNPX61yvNnEbiA8PoNk1fbkxApJFCWYLPpQbdVWzHaARdZD9g1PYBoOvnffortfKw==", "dependencies": { "@types/node": "*", - "playwright-core": "1.32.3" + "playwright-core": "1.34.2" }, "bin": { "playwright": "cli.js" @@ -167,6 +171,27 @@ "fsevents": "2.3.2" } }, + "node_modules/@playwright/test/node_modules/playwright-core": { + "version": "1.34.2", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.34.2.tgz", + "integrity": "sha512-MrkgGWLANc5qthXduvIY1a/tuafROyTORVd86fwKwgoYrmnBooN/GgeZSBm7ljTLV2FCWNSXV3se7qeScKn83g==", + "bin": { + "playwright": "cli.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@types/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==", + "dev": true, + "dependencies": { + "@types/minimatch": "*", + "@types/node": "*" + } + }, "node_modules/@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", @@ -177,8 +202,18 @@ "version": "0.0.29", "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", - "dev": true, - "peer": true + "dev": true + }, + "node_modules/@types/luxon": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@types/luxon/-/luxon-3.3.0.tgz", + "integrity": "sha512-uKRI5QORDnrGFYgcdAVnHvEIvEZ8noTpP/Bg+HeUzZghwinDlIS87DEenV5r1YoOF9G4x600YsUXLWZ19rmTmg==" + }, + "node_modules/@types/minimatch": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz", + "integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==", + "dev": true }, "node_modules/@types/node": { "version": "18.11.9", @@ -201,21 +236,31 @@ "dev": true }, "node_modules/@types/semver": { - "version": "7.3.13", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", - "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.0.tgz", + "integrity": "sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==", "dev": true }, + "node_modules/@types/shelljs": { + "version": "0.8.12", + "resolved": "https://registry.npmjs.org/@types/shelljs/-/shelljs-0.8.12.tgz", + "integrity": "sha512-ZA8U81/gldY+rR5zl/7HSHrG2KDfEb3lzG6uCUDhW1DTQE9yC/VBQ45fXnXq8f3CgInfhZmjtdu/WOUlrXRQUg==", + "dev": true, + "dependencies": { + "@types/glob": "~7.2.0", + "@types/node": "*" + } + }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.0.tgz", - "integrity": "sha512-p0QgrEyrxAWBecR56gyn3wkG15TJdI//eetInP3zYRewDh0XS+DhB3VUAd3QqvziFsfaQIoIuZMxZRB7vXYaYw==", + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.7.tgz", + "integrity": "sha512-BL+jYxUFIbuYwy+4fF86k5vdT9lT0CNJ6HtwrIvGh0PhH8s0yy5rjaKH2fDCrz5ITHy07WCzVGNvAmjJh4IJFA==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/type-utils": "5.59.0", - "@typescript-eslint/utils": "5.59.0", + "@typescript-eslint/scope-manager": "5.59.7", + "@typescript-eslint/type-utils": "5.59.7", + "@typescript-eslint/utils": "5.59.7", "debug": "^4.3.4", "grapheme-splitter": "^1.0.4", "ignore": "^5.2.0", @@ -240,15 +285,62 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.7.tgz", + "integrity": "sha512-FL6hkYWK9zBGdxT2wWEd2W8ocXMu3K94i3gvMrjXpx+koFYdYV7KprKfirpgY34vTGzEPPuKoERpP8kD5h7vZQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "@typescript-eslint/visitor-keys": "5.59.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.7.tgz", + "integrity": "sha512-UnVS2MRRg6p7xOSATscWkKjlf/NDKuqo5TdbWck6rIRZbmKpVNTLALzNvcjIfHBE7736kZOFc/4Z3VcZwuOM/A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.7.tgz", + "integrity": "sha512-tyN+X2jvMslUszIiYbF0ZleP+RqQsFVpGrKI6e0Eet1w8WmhsAtmzaqm8oM8WJQ1ysLwhnsK/4hYHJjOgJVfQQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.59.0.tgz", - "integrity": "sha512-qK9TZ70eJtjojSUMrrEwA9ZDQ4N0e/AuoOIgXuNBorXYcBDk397D2r5MIe1B3cok/oCtdNC5j+lUUpVB+Dpb+w==", + "version": "5.59.6", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.59.6.tgz", + "integrity": "sha512-7pCa6al03Pv1yf/dUg/s1pXz/yGMUBAw5EeWqNTFiSueKvRNonze3hma3lhdsOrQcaOXhbk5gKu2Fludiho9VA==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/typescript-estree": "5.59.0", + "@typescript-eslint/scope-manager": "5.59.6", + "@typescript-eslint/types": "5.59.6", + "@typescript-eslint/typescript-estree": "5.59.6", "debug": "^4.3.4" }, "engines": { @@ -268,13 +360,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.0.tgz", - "integrity": "sha512-tsoldKaMh7izN6BvkK6zRMINj4Z2d6gGhO2UsI8zGZY3XhLq1DndP3Ycjhi1JwdwPRwtLMW4EFPgpuKhbCGOvQ==", + "version": "5.59.6", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.6.tgz", + "integrity": "sha512-gLbY3Le9Dxcb8KdpF0+SJr6EQ+hFGYFl6tVY8VxLPFDfUZC7BHFw+Vq7bM5lE9DwWPfx4vMWWTLGXgpc0mAYyQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/visitor-keys": "5.59.0" + "@typescript-eslint/types": "5.59.6", + "@typescript-eslint/visitor-keys": "5.59.6" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -285,13 +377,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.59.0.tgz", - "integrity": "sha512-d/B6VSWnZwu70kcKQSCqjcXpVH+7ABKH8P1KNn4K7j5PXXuycZTPXF44Nui0TEm6rbWGi8kc78xRgOC4n7xFgA==", + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.59.7.tgz", + "integrity": "sha512-ozuz/GILuYG7osdY5O5yg0QxXUAEoI4Go3Do5xeu+ERH9PorHBPSdvD3Tjp2NN2bNLh1NJQSsQu2TPu/Ly+HaQ==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "5.59.0", - "@typescript-eslint/utils": "5.59.0", + "@typescript-eslint/typescript-estree": "5.59.7", + "@typescript-eslint/utils": "5.59.7", "debug": "^4.3.4", "tsutils": "^3.21.0" }, @@ -311,10 +403,67 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.7.tgz", + "integrity": "sha512-UnVS2MRRg6p7xOSATscWkKjlf/NDKuqo5TdbWck6rIRZbmKpVNTLALzNvcjIfHBE7736kZOFc/4Z3VcZwuOM/A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.7.tgz", + "integrity": "sha512-4A1NtZ1I3wMN2UGDkU9HMBL+TIQfbrh4uS0WDMMpf3xMRursDbqEf1ahh6vAAe3mObt8k3ZATnezwG4pdtWuUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "@typescript-eslint/visitor-keys": "5.59.7", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.7.tgz", + "integrity": "sha512-tyN+X2jvMslUszIiYbF0ZleP+RqQsFVpGrKI6e0Eet1w8WmhsAtmzaqm8oM8WJQ1ysLwhnsK/4hYHJjOgJVfQQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/types": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.0.tgz", - "integrity": "sha512-yR2h1NotF23xFFYKHZs17QJnB51J/s+ud4PYU4MqdZbzeNxpgUr05+dNeCN/bb6raslHvGdd6BFCkVhpPk/ZeA==", + "version": "5.59.6", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.6.tgz", + "integrity": "sha512-tH5lBXZI7T2MOUgOWFdVNUILsI02shyQvfzG9EJkoONWugCG77NDDa1EeDGw7oJ5IvsTAAGVV8I3Tk2PNu9QfA==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -325,13 +474,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.0.tgz", - "integrity": "sha512-sUNnktjmI8DyGzPdZ8dRwW741zopGxltGs/SAPgGL/AAgDpiLsCFLcMNSpbfXfmnNeHmK9h3wGmCkGRGAoUZAg==", + "version": "5.59.6", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.6.tgz", + "integrity": "sha512-vW6JP3lMAs/Tq4KjdI/RiHaaJSO7IUsbkz17it/Rl9Q+WkQ77EOuOnlbaU8kKfVIOJxMhnRiBG+olE7f3M16DA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/visitor-keys": "5.59.0", + "@typescript-eslint/types": "5.59.6", + "@typescript-eslint/visitor-keys": "5.59.6", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -352,17 +501,17 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.0.tgz", - "integrity": "sha512-GGLFd+86drlHSvPgN/el6dRQNYYGOvRSDVydsUaQluwIW3HvbXuxyuD5JETvBt/9qGYe+lOrDk6gRrWOHb/FvA==", + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.7.tgz", + "integrity": "sha512-yCX9WpdQKaLufz5luG4aJbOpdXf/fjwGMcLFXZVPUz3QqLirG5QcwwnIHNf8cjLjxK4qtzTO8udUtMQSAToQnQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@types/json-schema": "^7.0.9", "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/typescript-estree": "5.59.0", + "@typescript-eslint/scope-manager": "5.59.7", + "@typescript-eslint/types": "5.59.7", + "@typescript-eslint/typescript-estree": "5.59.7", "eslint-scope": "^5.1.1", "semver": "^7.3.7" }, @@ -377,13 +526,87 @@ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.7.tgz", + "integrity": "sha512-FL6hkYWK9zBGdxT2wWEd2W8ocXMu3K94i3gvMrjXpx+koFYdYV7KprKfirpgY34vTGzEPPuKoERpP8kD5h7vZQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "@typescript-eslint/visitor-keys": "5.59.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.7.tgz", + "integrity": "sha512-UnVS2MRRg6p7xOSATscWkKjlf/NDKuqo5TdbWck6rIRZbmKpVNTLALzNvcjIfHBE7736kZOFc/4Z3VcZwuOM/A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.7.tgz", + "integrity": "sha512-4A1NtZ1I3wMN2UGDkU9HMBL+TIQfbrh4uS0WDMMpf3xMRursDbqEf1ahh6vAAe3mObt8k3ZATnezwG4pdtWuUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "@typescript-eslint/visitor-keys": "5.59.7", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.59.7", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.7.tgz", + "integrity": "sha512-tyN+X2jvMslUszIiYbF0ZleP+RqQsFVpGrKI6e0Eet1w8WmhsAtmzaqm8oM8WJQ1ysLwhnsK/4hYHJjOgJVfQQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.7", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.0.tgz", - "integrity": "sha512-qZ3iXxQhanchCeaExlKPV3gDQFxMUmU35xfd5eCXB6+kUw1TUAbIy2n7QIrwz9s98DQLzNWyHp61fY0da4ZcbA==", + "version": "5.59.6", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.6.tgz", + "integrity": "sha512-zEfbFLzB9ETcEJ4HZEEsCR9HHeNku5/Qw1jSS5McYJv5BR+ftYXwFFAH5Al+xkGaZEqowMwl7uoJjQb1YSPF8Q==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.59.0", + "@typescript-eslint/types": "5.59.6", "eslint-visitor-keys": "^3.3.0" }, "engines": { @@ -466,7 +689,6 @@ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", "dev": true, - "peer": true, "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -495,7 +717,24 @@ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", "dev": true, - "peer": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", + "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "dev": true, "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -735,7 +974,6 @@ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", "dev": true, - "peer": true, "dependencies": { "has": "^1.0.3" } @@ -770,15 +1008,15 @@ } }, "node_modules/eslint": { - "version": "8.38.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.38.0.tgz", - "integrity": "sha512-pIdsD2jwlUGf/U38Jv97t8lq6HpaU/G9NKbYmpWpZGw3LdTNhZLbJePqxOXGB5+JEKfOPU/XLxYxFh03nr1KTg==", + "version": "8.41.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.41.0.tgz", + "integrity": "sha512-WQDQpzGBOP5IrXPo4Hc0814r4/v2rrIsB0rhT7jtunIalgg6gYXWhRMOejVO8yH21T/FGaxjmFjBMNqcIlmH1Q==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.0.2", - "@eslint/js": "8.38.0", + "@eslint/eslintrc": "^2.0.3", + "@eslint/js": "8.41.0", "@humanwhocodes/config-array": "^0.11.8", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", @@ -788,9 +1026,9 @@ "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.4.0", - "espree": "^9.5.1", + "eslint-scope": "^7.2.0", + "eslint-visitor-keys": "^3.4.1", + "espree": "^9.5.2", "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -798,13 +1036,12 @@ "find-up": "^5.0.0", "glob-parent": "^6.0.2", "globals": "^13.19.0", - "grapheme-splitter": "^1.0.4", + "graphemer": "^1.4.0", "ignore": "^5.2.0", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", - "js-sdsl": "^4.1.4", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", @@ -870,14 +1107,14 @@ } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", - "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", + "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", "dev": true, - "peer": true, "dependencies": { "debug": "^3.2.7", - "resolve": "^1.20.0" + "is-core-module": "^2.11.0", + "resolve": "^1.22.1" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { @@ -885,7 +1122,6 @@ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dev": true, - "peer": true, "dependencies": { "ms": "^2.1.1" } @@ -895,7 +1131,6 @@ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", "dev": true, - "peer": true, "dependencies": { "debug": "^3.2.7" }, @@ -913,30 +1148,30 @@ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dev": true, - "peer": true, "dependencies": { "ms": "^2.1.1" } }, "node_modules/eslint-plugin-import": { - "version": "2.26.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.26.0.tgz", - "integrity": "sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==", + "version": "2.27.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", + "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", "dev": true, - "peer": true, "dependencies": { - "array-includes": "^3.1.4", - "array.prototype.flat": "^1.2.5", - "debug": "^2.6.9", + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "array.prototype.flatmap": "^1.3.1", + "debug": "^3.2.7", "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.6", - "eslint-module-utils": "^2.7.3", + "eslint-import-resolver-node": "^0.3.7", + "eslint-module-utils": "^2.7.4", "has": "^1.0.3", - "is-core-module": "^2.8.1", + "is-core-module": "^2.11.0", "is-glob": "^4.0.3", "minimatch": "^3.1.2", - "object.values": "^1.1.5", - "resolve": "^1.22.0", + "object.values": "^1.1.6", + "resolve": "^1.22.1", + "semver": "^6.3.0", "tsconfig-paths": "^3.14.1" }, "engines": { @@ -947,13 +1182,12 @@ } }, "node_modules/eslint-plugin-import/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "dev": true, - "peer": true, "dependencies": { - "ms": "2.0.0" + "ms": "^2.1.1" } }, "node_modules/eslint-plugin-import/node_modules/doctrine": { @@ -961,7 +1195,6 @@ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", "dev": true, - "peer": true, "dependencies": { "esutils": "^2.0.2" }, @@ -969,12 +1202,14 @@ "node": ">=0.10.0" } }, - "node_modules/eslint-plugin-import/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "node_modules/eslint-plugin-import/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", "dev": true, - "peer": true + "bin": { + "semver": "bin/semver.js" + } }, "node_modules/eslint-plugin-playwright": { "version": "0.12.0", @@ -1005,9 +1240,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", - "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz", + "integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -1017,9 +1252,9 @@ } }, "node_modules/eslint/node_modules/eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.0.tgz", + "integrity": "sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -1027,6 +1262,9 @@ }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint/node_modules/estraverse": { @@ -1039,14 +1277,14 @@ } }, "node_modules/espree": { - "version": "9.5.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", - "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", + "version": "9.5.2", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.2.tgz", + "integrity": "sha512-7OASN1Wma5fum5SrNhFMAMJxOUAbhyfQ8dQ//PJaJbNw0URTPWqIghHWt1MmAANKhHZIYOHruW4Kw4ruUWOdGw==", "dev": true, "dependencies": { "acorn": "^8.8.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.0" + "eslint-visitor-keys": "^3.4.1" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -1381,6 +1619,12 @@ "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, "node_modules/has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", @@ -1727,16 +1971,6 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, - "node_modules/js-sdsl": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.3.0.tgz", - "integrity": "sha512-mifzlm2+5nZ+lEcLJMoBK0/IH/bDg8XnJfd/Wq6IP+xoCjLZsTOnV2QpxlVbX9bMnkl5PdEjNtBJ9Cj1NjifhQ==", - "dev": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, "node_modules/js-yaml": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", @@ -1766,7 +2000,6 @@ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", "dev": true, - "peer": true, "dependencies": { "minimist": "^1.2.0" }, @@ -1820,6 +2053,14 @@ "node": ">=10" } }, + "node_modules/luxon": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.3.0.tgz", + "integrity": "sha512-An0UCfG/rSiqtAIiBPO0Y9/zAnHUZxAMiCpTd5h2smgsj7GGmcenvrvww2cqNA8/4A5ZrD1gJpHN2mIHZQF+Mg==", + "engines": { + "node": ">=12" + } + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -1858,7 +2099,6 @@ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", "dev": true, - "peer": true, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -1936,7 +2176,6 @@ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", "dev": true, - "peer": true, "dependencies": { "call-bind": "^1.0.2", "define-properties": "^1.1.4", @@ -2069,12 +2308,12 @@ } }, "node_modules/playwright": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.32.3.tgz", - "integrity": "sha512-h/ylpgoj6l/EjkfUDyx8cdOlfzC96itPpPe8BXacFkqpw/YsuxkpPyVbzEq4jw+bAJh5FLgh31Ljg2cR6HV3uw==", + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.33.0.tgz", + "integrity": "sha512-+zzU3V2TslRX2ETBRgQKsKytYBkJeLZ2xzUj4JohnZnxQnivoUvOvNbRBYWSYykQTO0Y4zb8NwZTYFUO+EpPBQ==", "hasInstallScript": true, "dependencies": { - "playwright-core": "1.32.3" + "playwright-core": "1.33.0" }, "bin": { "playwright": "cli.js" @@ -2084,9 +2323,9 @@ } }, "node_modules/playwright-core": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.32.3.tgz", - "integrity": "sha512-SB+cdrnu74ZIn5Ogh/8278ngEh9NEEV0vR4sJFmK04h2iZpybfbqBY0bX6+BLYWVdV12JLLI+JEFtSnYgR+mWg==", + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.33.0.tgz", + "integrity": "sha512-aizyPE1Cj62vAECdph1iaMILpT0WUDCq3E6rW6I+dleSbBoGbktvJtzS6VHkZ4DKNEOG9qJpiom/ZxO+S15LAw==", "bin": { "playwright": "cli.js" }, @@ -2387,7 +2626,6 @@ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", "dev": true, - "peer": true, "engines": { "node": ">=4" } @@ -2450,7 +2688,6 @@ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", "dev": true, - "peer": true, "dependencies": { "@types/json5": "^0.0.29", "json5": "^1.0.1", @@ -2602,1826 +2839,5 @@ "url": "https://github.com/sponsors/sindresorhus" } } - }, - "dependencies": { - "@eslint-community/eslint-utils": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.2.0.tgz", - "integrity": "sha512-gB8T4H4DEfX2IV9zGDJPOBgP1e/DbfCPDTtEqUMckpvzS1OYtva8JdFYBqMwYk7xAQ429WGF/UPqn8uQ//h2vQ==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^3.3.0" - } - }, - "@eslint-community/regexpp": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.4.0.tgz", - "integrity": "sha512-A9983Q0LnDGdLPjxyXQ00sbV+K+O+ko2Dr+CZigbHWtX9pNfxlaBkMR8X1CztI73zuEyEBXTVjx7CE+/VSwDiQ==", - "dev": true - }, - "@eslint/eslintrc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", - "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", - "dev": true, - "requires": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.5.1", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - } - }, - "@eslint/js": { - "version": "8.38.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.38.0.tgz", - "integrity": "sha512-IoD2MfUnOV58ghIHCiil01PcohxjbYR/qCxsoC+xNgUwh1EY8jOOrYmu3d3a71+tJJ23uscEV4X2HJWMsPJu4g==", - "dev": true - }, - "@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", - "dev": true, - "requires": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", - "minimatch": "^3.0.5" - } - }, - "@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true - }, - "@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", - "dev": true - }, - "@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true - }, - "@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "requires": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - } - }, - "@playwright/test": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.32.3.tgz", - "integrity": "sha512-BvWNvK0RfBriindxhLVabi8BRe3X0J9EVjKlcmhxjg4giWBD/xleLcg2dz7Tx0agu28rczjNIPQWznwzDwVsZQ==", - "requires": { - "@types/node": "*", - "fsevents": "2.3.2", - "playwright-core": "1.32.3" - } - }, - "@types/json-schema": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", - "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", - "dev": true - }, - "@types/json5": { - "version": "0.0.29", - "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", - "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", - "dev": true, - "peer": true - }, - "@types/node": { - "version": "18.11.9", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz", - "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==" - }, - "@types/promise-retry": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@types/promise-retry/-/promise-retry-1.1.3.tgz", - "integrity": "sha512-LxIlEpEX6frE3co3vCO2EUJfHIta1IOmhDlcAsR4GMMv9hev1iTI9VwberVGkePJAuLZs5rMucrV8CziCfuJMw==", - "dev": true, - "requires": { - "@types/retry": "*" - } - }, - "@types/retry": { - "version": "0.12.2", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.2.tgz", - "integrity": "sha512-XISRgDJ2Tc5q4TRqvgJtzsRkFYNJzZrhTdtMoGVBttwzzQJkPnS3WWTFc7kuDRoPtPakl+T+OfdEUjYJj7Jbow==", - "dev": true - }, - "@types/semver": { - "version": "7.3.13", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", - "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", - "dev": true - }, - "@typescript-eslint/eslint-plugin": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.0.tgz", - "integrity": "sha512-p0QgrEyrxAWBecR56gyn3wkG15TJdI//eetInP3zYRewDh0XS+DhB3VUAd3QqvziFsfaQIoIuZMxZRB7vXYaYw==", - "dev": true, - "requires": { - "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/type-utils": "5.59.0", - "@typescript-eslint/utils": "5.59.0", - "debug": "^4.3.4", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "natural-compare-lite": "^1.4.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" - } - }, - "@typescript-eslint/parser": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.59.0.tgz", - "integrity": "sha512-qK9TZ70eJtjojSUMrrEwA9ZDQ4N0e/AuoOIgXuNBorXYcBDk397D2r5MIe1B3cok/oCtdNC5j+lUUpVB+Dpb+w==", - "dev": true, - "requires": { - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/typescript-estree": "5.59.0", - "debug": "^4.3.4" - } - }, - "@typescript-eslint/scope-manager": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.0.tgz", - "integrity": "sha512-tsoldKaMh7izN6BvkK6zRMINj4Z2d6gGhO2UsI8zGZY3XhLq1DndP3Ycjhi1JwdwPRwtLMW4EFPgpuKhbCGOvQ==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/visitor-keys": "5.59.0" - } - }, - "@typescript-eslint/type-utils": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.59.0.tgz", - "integrity": "sha512-d/B6VSWnZwu70kcKQSCqjcXpVH+7ABKH8P1KNn4K7j5PXXuycZTPXF44Nui0TEm6rbWGi8kc78xRgOC4n7xFgA==", - "dev": true, - "requires": { - "@typescript-eslint/typescript-estree": "5.59.0", - "@typescript-eslint/utils": "5.59.0", - "debug": "^4.3.4", - "tsutils": "^3.21.0" - } - }, - "@typescript-eslint/types": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.0.tgz", - "integrity": "sha512-yR2h1NotF23xFFYKHZs17QJnB51J/s+ud4PYU4MqdZbzeNxpgUr05+dNeCN/bb6raslHvGdd6BFCkVhpPk/ZeA==", - "dev": true - }, - "@typescript-eslint/typescript-estree": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.0.tgz", - "integrity": "sha512-sUNnktjmI8DyGzPdZ8dRwW741zopGxltGs/SAPgGL/AAgDpiLsCFLcMNSpbfXfmnNeHmK9h3wGmCkGRGAoUZAg==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/visitor-keys": "5.59.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" - } - }, - "@typescript-eslint/utils": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.0.tgz", - "integrity": "sha512-GGLFd+86drlHSvPgN/el6dRQNYYGOvRSDVydsUaQluwIW3HvbXuxyuD5JETvBt/9qGYe+lOrDk6gRrWOHb/FvA==", - "dev": true, - "requires": { - "@eslint-community/eslint-utils": "^4.2.0", - "@types/json-schema": "^7.0.9", - "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.59.0", - "@typescript-eslint/types": "5.59.0", - "@typescript-eslint/typescript-estree": "5.59.0", - "eslint-scope": "^5.1.1", - "semver": "^7.3.7" - } - }, - "@typescript-eslint/visitor-keys": { - "version": "5.59.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.0.tgz", - "integrity": "sha512-qZ3iXxQhanchCeaExlKPV3gDQFxMUmU35xfd5eCXB6+kUw1TUAbIy2n7QIrwz9s98DQLzNWyHp61fY0da4ZcbA==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.59.0", - "eslint-visitor-keys": "^3.3.0" - } - }, - "acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", - "dev": true - }, - "acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "requires": {} - }, - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", - "dev": true, - "peer": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", - "is-string": "^1.0.7" - } - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", - "dev": true, - "peer": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0" - } - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "requires": { - "fill-range": "^7.0.1" - } - }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" - }, - "confusing-browser-globals": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", - "integrity": "sha512-JsPKdmh8ZkmnHxDk55FZ1TqVLvEQTvoByJZRN9jzI0UjxK/QgAmsphz7PGtqgPieQZ/CQcHWXCR7ATDNhGe+YA==", - "dev": true - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true - }, - "define-properties": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz", - "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==", - "dev": true, - "requires": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - } - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==" - }, - "err-code": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", - "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==" - }, - "es-abstract": { - "version": "1.20.4", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.20.4.tgz", - "integrity": "sha512-0UtvRN79eMe2L+UNEF1BwRe364sj/DXhQ/k5FmivgoSdpM90b8Jc0mDzKMGo7QS0BVbOP/bTwBKNnDc9rNzaPA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.1.3", - "get-symbol-description": "^1.0.0", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "is-string": "^1.0.7", - "is-weakref": "^1.0.2", - "object-inspect": "^1.12.2", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trimend": "^1.0.5", - "string.prototype.trimstart": "^1.0.5", - "unbox-primitive": "^1.0.2" - } - }, - "es-shim-unscopables": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", - "dev": true, - "peer": true, - "requires": { - "has": "^1.0.3" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true - }, - "eslint": { - "version": "8.38.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.38.0.tgz", - "integrity": "sha512-pIdsD2jwlUGf/U38Jv97t8lq6HpaU/G9NKbYmpWpZGw3LdTNhZLbJePqxOXGB5+JEKfOPU/XLxYxFh03nr1KTg==", - "dev": true, - "requires": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.0.2", - "@eslint/js": "8.38.0", - "@humanwhocodes/config-array": "^0.11.8", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.4.0", - "espree": "^9.5.1", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-sdsl": "^4.1.4", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", - "text-table": "^0.2.0" - }, - "dependencies": { - "eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - } - }, - "estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true - } - } - }, - "eslint-config-airbnb-base": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-15.0.0.tgz", - "integrity": "sha512-xaX3z4ZZIcFLvh2oUNvcX5oEofXda7giYmuplVxoOg5A7EXJMrUyqRgR+mhDhPK8LZ4PttFOBvCYDbX3sUoUig==", - "dev": true, - "requires": { - "confusing-browser-globals": "^1.0.10", - "object.assign": "^4.1.2", - "object.entries": "^1.1.5", - "semver": "^6.3.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "eslint-config-airbnb-typescript": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/eslint-config-airbnb-typescript/-/eslint-config-airbnb-typescript-17.0.0.tgz", - "integrity": "sha512-elNiuzD0kPAPTXjFWg+lE24nMdHMtuxgYoD30OyMD6yrW1AhFZPAg27VX7d3tzOErw+dgJTNWfRSDqEcXb4V0g==", - "dev": true, - "requires": { - "eslint-config-airbnb-base": "^15.0.0" - } - }, - "eslint-import-resolver-node": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz", - "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==", - "dev": true, - "peer": true, - "requires": { - "debug": "^3.2.7", - "resolve": "^1.20.0" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "peer": true, - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "eslint-module-utils": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", - "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", - "dev": true, - "peer": true, - "requires": { - "debug": "^3.2.7" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "peer": true, - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "eslint-plugin-import": { - "version": "2.26.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.26.0.tgz", - "integrity": "sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==", - "dev": true, - "peer": true, - "requires": { - "array-includes": "^3.1.4", - "array.prototype.flat": "^1.2.5", - "debug": "^2.6.9", - "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.6", - "eslint-module-utils": "^2.7.3", - "has": "^1.0.3", - "is-core-module": "^2.8.1", - "is-glob": "^4.0.3", - "minimatch": "^3.1.2", - "object.values": "^1.1.5", - "resolve": "^1.22.0", - "tsconfig-paths": "^3.14.1" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "peer": true, - "requires": { - "ms": "2.0.0" - } - }, - "doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "peer": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "dev": true, - "peer": true - } - } - }, - "eslint-plugin-playwright": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-playwright/-/eslint-plugin-playwright-0.12.0.tgz", - "integrity": "sha512-KXuzQjVzca5irMT/7rvzJKsVDGbQr43oQPc8i+SLEBqmfrTxlwMwRqfv9vtZqh4hpU0jmrnA/EOfwtls+5QC1w==", - "dev": true, - "requires": {} - }, - "eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - } - }, - "eslint-visitor-keys": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", - "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", - "dev": true - }, - "espree": { - "version": "9.5.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", - "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", - "dev": true, - "requires": { - "acorn": "^8.8.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.0" - } - }, - "esquery": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.2.tgz", - "integrity": "sha512-JVSoLdTlTDkmjFmab7H/9SL9qGSyjElT3myyKp7krqjVFQCDLmj1QFaCLRFBszBKI0XVZaiiXvuPIX3ZwHe1Ng==", - "dev": true, - "requires": { - "estraverse": "^5.1.0" - }, - "dependencies": { - "estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true - } - } - }, - "esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "requires": { - "estraverse": "^5.2.0" - }, - "dependencies": { - "estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true - } - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - }, - "dependencies": { - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - } - } - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true - }, - "fastq": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz", - "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", - "dev": true, - "requires": { - "reusify": "^1.0.4" - } - }, - "file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", - "dev": true, - "requires": { - "flat-cache": "^3.0.4" - } - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", - "dev": true, - "requires": { - "flatted": "^3.1.0", - "rimraf": "^3.0.2" - } - }, - "flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", - "dev": true - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" - } - }, - "functions-have-names": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", - "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", - "dev": true - }, - "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - } - }, - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "requires": { - "is-glob": "^4.0.3" - } - }, - "globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", - "dev": true, - "requires": { - "type-fest": "^0.20.2" - } - }, - "globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - } - }, - "grapheme-splitter": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", - "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", - "dev": true - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-bigints": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", - "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.1" - } - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true - }, - "has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", - "dev": true, - "requires": { - "has-symbols": "^1.0.2" - } - }, - "ignore": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz", - "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==", - "dev": true - }, - "import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dev": true, - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - } - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "internal-slot": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", - "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - } - }, - "interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" - }, - "is-bigint": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", - "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", - "dev": true, - "requires": { - "has-bigints": "^1.0.1" - } - }, - "is-boolean-object": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", - "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-callable": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", - "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", - "dev": true - }, - "is-core-module": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", - "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", - "requires": { - "has": "^1.0.3" - } - }, - "is-date-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true - }, - "is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", - "dev": true - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true - }, - "is-number-object": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", - "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true - }, - "is-regex": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-string": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", - "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", - "dev": true, - "requires": { - "has-symbols": "^1.0.2" - } - }, - "is-weakref": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", - "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2" - } - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "js-sdsl": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.3.0.tgz", - "integrity": "sha512-mifzlm2+5nZ+lEcLJMoBK0/IH/bDg8XnJfd/Wq6IP+xoCjLZsTOnV2QpxlVbX9bMnkl5PdEjNtBJ9Cj1NjifhQ==", - "dev": true - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "requires": { - "argparse": "^2.0.1" - } - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true - }, - "json5": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", - "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", - "dev": true, - "peer": true, - "requires": { - "minimist": "^1.2.0" - } - }, - "levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - } - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "requires": { - "p-locate": "^5.0.0" - } - }, - "lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true - }, - "micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "dev": true, - "requires": { - "braces": "^3.0.2", - "picomatch": "^2.3.1" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", - "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", - "dev": true, - "peer": true - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true - }, - "natural-compare-lite": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", - "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", - "dev": true - }, - "object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", - "dev": true - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true - }, - "object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - } - }, - "object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", - "dev": true, - "peer": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "requires": { - "wrappy": "1" - } - }, - "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "requires": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - } - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "requires": { - "p-limit": "^3.0.2" - } - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "requires": { - "callsites": "^3.0.0" - } - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==" - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true - }, - "picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true - }, - "playwright": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.32.3.tgz", - "integrity": "sha512-h/ylpgoj6l/EjkfUDyx8cdOlfzC96itPpPe8BXacFkqpw/YsuxkpPyVbzEq4jw+bAJh5FLgh31Ljg2cR6HV3uw==", - "requires": { - "playwright-core": "1.32.3" - } - }, - "playwright-core": { - "version": "1.32.3", - "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.32.3.tgz", - "integrity": "sha512-SB+cdrnu74ZIn5Ogh/8278ngEh9NEEV0vR4sJFmK04h2iZpybfbqBY0bX6+BLYWVdV12JLLI+JEFtSnYgR+mWg==" - }, - "prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true - }, - "promise-retry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", - "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", - "requires": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - } - }, - "punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true - }, - "queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", - "requires": { - "resolve": "^1.1.6" - } - }, - "regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" - } - }, - "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", - "requires": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - } - }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - }, - "retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==" - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "requires": { - "queue-microtask": "^1.2.2" - } - }, - "safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-regex": "^1.1.4" - } - }, - "semver": { - "version": "7.3.8", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", - "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "shelljs": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz", - "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true - }, - "string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", - "dev": true, - "peer": true - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==" - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "tsconfig-paths": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz", - "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==", - "dev": true, - "peer": true, - "requires": { - "@types/json5": "^0.0.29", - "json5": "^1.0.1", - "minimist": "^1.2.6", - "strip-bom": "^3.0.0" - } - }, - "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "tsutils": { - "version": "3.21.0", - "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", - "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", - "dev": true, - "requires": { - "tslib": "^1.8.1" - } - }, - "type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1" - } - }, - "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true - }, - "typescript": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.4.tgz", - "integrity": "sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==" - }, - "unbox-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", - "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-bigints": "^1.0.2", - "has-symbols": "^1.0.3", - "which-boxed-primitive": "^1.0.2" - } - }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "requires": { - "punycode": "^2.1.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dev": true, - "requires": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, - "yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true - } } } diff --git a/cli-tests/package.json b/cli-tests/package.json index b0b74b3f26..8dbe7e4f18 100644 --- a/cli-tests/package.json +++ b/cli-tests/package.json @@ -12,20 +12,24 @@ "pmm": "pmm" }, "dependencies": { - "@playwright/test": "^1.32.3", + "@playwright/test": "^1.34.2", + "@types/luxon": "^3.3.0", "dotenv": "^16.0.3", - "playwright": "^1.32.3", + "luxon": "^3.3.0", + "playwright": "^1.33.0", "promise-retry": "^2.0.1", "shelljs": "^0.8.5", "typescript": "^5.0.4" }, "devDependencies": { "@types/promise-retry": "^1.1.3", - "@typescript-eslint/eslint-plugin": "^5.59.0", - "@typescript-eslint/parser": "^5.59.0", - "eslint": "8.38", + "@types/shelljs": "^0.8.12", + "@typescript-eslint/eslint-plugin": "^5.59.7", + "@typescript-eslint/parser": "^5.59.6", + "eslint": "8.41", "eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-typescript": "^17.0.0", + "eslint-plugin-import": "^2.27.5", "eslint-plugin-playwright": "^0.12.0" } } diff --git a/cli-tests/playwright.config.ts b/cli-tests/playwright.config.ts index 19cdc27606..1f66ed11dc 100644 --- a/cli-tests/playwright.config.ts +++ b/cli-tests/playwright.config.ts @@ -1,5 +1,4 @@ -import type { PlaywrightTestConfig } from '@playwright/test'; - +import { defineConfig, devices } from '@playwright/test'; import * as dotenv from 'dotenv'; /** @@ -13,13 +12,24 @@ dotenv.config(); * See https://playwright.dev/docs/test-configuration. */ -const config: PlaywrightTestConfig = { +export default defineConfig({ testDir: './', timeout: 300_000, expect: { timeout: 2000, }, - + projects: [ + { + name: 'setup', + testMatch: 'support/pmm-bin.setup.ts', + }, + { + name: 'cli', + use: { ...devices['Desktop Chrome'] }, + dependencies: ['setup'], + }, + ], + globalTeardown: './global-teardown', /* Run tests in files in parallel */ fullyParallel: false, /* Fail the build on CI if you accidentally left test.only in the source code. */ @@ -27,8 +37,10 @@ const config: PlaywrightTestConfig = { /* Opt out of parallel tests on CI. */ workers: 6, /* Reporter to use. See https://playwright.dev/docs/test-reporters */ - reporter: [['html', { open: 'never' }]], + reporter: [ + ['github'], + ['list'], + ['html', { open: 'never' }], + ], /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ -}; - -export default config; +}); diff --git a/cli-tests/support/pmm-bin.setup.ts b/cli-tests/support/pmm-bin.setup.ts new file mode 100644 index 0000000000..69d0f84991 --- /dev/null +++ b/cli-tests/support/pmm-bin.setup.ts @@ -0,0 +1,28 @@ +import { test as setup } from '@playwright/test'; +import * as cli from '@helpers/cliHelper'; + +const oldImage = 'percona/pmm-server:2.32.0'; +const newImage = 'percona/pmm-server:2.33.0'; + +setup.describe.configure({ mode: 'parallel' }); + +/** + * Extension point un-hardcode versions using environment variables + */ +setup('Set default env.VARs', async () => { + // TODO: add detection of latest released and RC versions and previous release: + // convert bash into api call with JS object parsing instead of jq + // const release_latest = (await cli.exec('wget -q https://registry.hub.docker.com/v2/repositories/percona/pmm-server/tags -O - | jq -r .results[].name | grep -v latest | sort -V | tail -n1')) + // .stdout; + // rc_latest=$(wget -q "https://registry.hub.docker.com/v2/repositories/perconalab/pmm-server/tags?page_size=25&name=rc" -O - | jq -r .results[].name | grep 2.*.*-rc$ | sort -V | tail -n1) + // rc_minor=$(echo $rc_latest | awk -F. '{print $2}') + // dev_latest="2.$((++rc_minor)).0" + await setup.step('Set pmm-server versions', async () => { + process.env.server_image = newImage; + process.env.old_server_image = oldImage; + }); + + // Download main images to reduce threads of download progress logs + await cli.exec(`docker pull ${oldImage} -q`); + await cli.exec(`docker pull ${newImage} -q`); +}); diff --git a/cli-tests/support/types/Output.ts b/cli-tests/support/types/Output.ts new file mode 100644 index 0000000000..295ebab582 --- /dev/null +++ b/cli-tests/support/types/Output.ts @@ -0,0 +1,102 @@ +import { test, expect } from '@playwright/test'; +import { DateTime } from 'luxon'; + +class Output { + command: string; + code: number; + stdout: string; + stderr: string; + + constructor(command: string, exitCode: number, stdOut: string, stdErr: string) { + this.command = command; + this.code = exitCode; + this.stdout = stdOut; + this.stderr = stdErr; + } + + getStdOutLines(): string[] { + return this.stdout.trim().split('\n').filter((item) => item.trim().length > 0); + } + + getStdErrLines(): string[] { + return this.stderr.trim().split('\n').filter((item) => item.trim().length > 0); + } + + /** + * Encapsulates composition of auto generated container name. + * The name is based on container start date and time, + * which is parsed from specified output lines produced by 'pmm server docker upgrade --json' + * Designed to parse: {@code (await cli.exec('pmm server docker upgrade --json')).getStdErrLines();} + * + * Example: + * > logs line: {"level":"info","msg":"Starting PMM Server","time":"2023-05-04T12:47:49-04:00"} + * > returns: 'pmm-server-2023-05-04-12-47-49' + * + * @param logs shell logs lines array {@link Output#getStdErrLines()} + * @param prefix name prefix to generate format: 'prefix-YYYY-MM-DD-HH-MM-SS' + * @return container name {@code string} in format: 'pmm-server-YYYY-MM-DD-HH-MM-SS' + */ + generateContainerNameFromLogs(prefix = 'pmm-server') { + const foundLine = this.getStdErrLines().find((item) => item.includes('"Starting PMM Server","time":')); + expect(foundLine, 'Output logs must be "json lines" and have "Starting PMM Server" with "time"').not.toBeUndefined(); + type LogLine = { level: string, msg: string, time: string }; + const startDateTime: string = (JSON.parse(foundLine.trim()) as LogLine).time; + return `${prefix}-${DateTime.fromISO(startDateTime).toFormat('yyyy-MM-dd-HH-mm-ss')}`; + } + + async assertSuccess() { + await test.step(`Verify "${this.command}" command executed successfully`, async () => { + expect(this.code, `"${this.command}" expected to exit with 0!\nStdout: ${this.stdout}\nStderr: "${this.stderr}"`).toEqual(0); + }); + } + + async exitCodeEquals(expectedValue: number) { + await test.step(`Verify "${this.command}" command exit code is ${expectedValue}`, async () => { + expect(this.code, `"${this.command}" expected to exit with ${expectedValue}! Output: "${this.stdout}"`).toEqual(expectedValue); + }); + } + + async outContains(expectedValue: string) { + await test.step(`Verify command output contains ${expectedValue}`, async () => { + expect(this.stdout, `Stdout should contain ${expectedValue}!`).toContain(expectedValue); + }); + } + + async outContainsMany(expectedValues: string[]) { + for (const val of expectedValues) { + await test.step(`Verify command output contains ${val}`, async () => { + expect.soft(this.stdout, `Stdout should contain '${val}'`).toContain(val); + }); + } + expect( + test.info().errors, + `'Contains all elements' failed with ${test.info().errors.length} error(s):\n${this.getErrors()}`, + ).toHaveLength(0); + } + + async outHasLine(expectedValue: string) { + await test.step(`Verify command output has line: '${expectedValue}'`, async () => { + expect(this.getStdOutLines(), `Stdout must have line: '${expectedValue}'`).toContainEqual(expectedValue); + }); + } + + async errContainsMany(expectedValues: string[]) { + for (const val of expectedValues) { + expect.soft(this.stderr, `Stderr should contain '${val}'`).toContain(val); + } + expect( + test.info().errors, + `'Contains all elements' failed with ${test.info().errors.length} error(s):\n${this.getErrors()}`, + ).toHaveLength(0); + } + + private getErrors(): string { + const errors: string[] = []; + for (const obj of test.info().errors) { + errors.push(`\t${obj.message.split('\n')[0]}`); + } + return errors.join('\n'); + } +} + +export default Output; diff --git a/cli-tests/support/types/PmmRestClient.ts b/cli-tests/support/types/PmmRestClient.ts new file mode 100644 index 0000000000..b965a1a021 --- /dev/null +++ b/cli-tests/support/types/PmmRestClient.ts @@ -0,0 +1,48 @@ +import { APIRequestContext, request } from '@playwright/test'; +import { APIResponse } from 'playwright'; + +const encodeAuth = (username: string, password: string) => { + return Buffer.from(`${username}:${password}`).toString( + 'base64', + ); +}; + +/** + * Api Client with implemented HTTP(S) requests methods. + */ +class PmmRestClient { + username: string; + password: string; + baseURL: string; + + constructor(username: string, password: string, port = 80, protocol = 'http') { + this.username = username; + this.password = password; + this.baseURL = `${protocol}://localhost:${port}`; + } + + async context(): Promise { + return request.newContext({ + baseURL: this.baseURL, + extraHTTPHeaders: { + Authorization: `Basic ${encodeAuth(this.username, this.password)}`, + }, + ignoreHTTPSErrors: true, + }); + } + + /** + * Implements HTTP(S) POST to PMM Server API + * + * @param path API endpoint path + * @param payload request body {@code Object} + * @return Promise instance + */ + async post(path: string, payload: unknown = {}): Promise { + console.log(`POST: ${this.baseURL}${path}\nPayload: ${JSON.stringify(payload)}`); + const response = await (await this.context()).post(path, payload); + console.log(`Status: ${response.status()} ${response.statusText()}`); + return response; + } +} +export default PmmRestClient; diff --git a/cli-tests/support/types/output.ts b/cli-tests/support/types/output.ts deleted file mode 100644 index 0d578e6010..0000000000 --- a/cli-tests/support/types/output.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { test, expect } from '@playwright/test'; - -class Output { - command: string; - code: number; - stdout: string; - stderr: string; - - constructor(command: string, exitCode: number, stdOut: string, stdErr: string) { - this.command = command; - this.code = exitCode; - this.stdout = stdOut; - this.stderr = stdErr; - } - - async assertSuccess() { - await test.step(`Verify "${this.command}" command executed successfully`, async () => { - expect(this.code, `"${this.command}" expected to exit with 0!\nStdout: ${this.stdout}\nStderr: "${this.stderr}"`).toEqual(0); - }); - } - - async containsMany(expectedValues: string[]) { - await test.step(`Verify "${this.command}" command output`, async () => { - for (const val of expectedValues) { - await test.step(`Verify command output contains ${val}`, async () => { - expect(this.stdout).toContain(val); - }); - } - }); - } -} - -export default Output; diff --git a/cli-tests/support/types/request.ts b/cli-tests/support/types/request.ts deleted file mode 100644 index 6a923293b4..0000000000 --- a/cli-tests/support/types/request.ts +++ /dev/null @@ -1,78 +0,0 @@ -import { APIRequest, expect, request } from '@playwright/test'; -import PromiseRetry from 'promise-retry'; - -type PMMRequest = { - port?: number, - username?: string, - password?: string, - data?: unknown -}; - -const encodeAuth = (username: string, password: string) => { - return Buffer.from(`${username}:${password}`).toString( - 'base64', - ); -}; - -export const pmmRequest = async (path: string, opts?: PMMRequest) => { - const { - password = 'admin', username = 'admin', port = 80, data = {}, - } = opts; - const ctx = await request.newContext({ - extraHTTPHeaders: { - Authorization: `Basic ${encodeAuth(username, password)}`, - }, - }); - - return ctx.post(`http://localhost:${port}${path}`, { data }); -}; - -class PMMRestClient { - username: string; - password: string; - port: number; - requestOpts: Parameters[0]; - - constructor(username: string, password: string, port = 80, requestOpts: Parameters[0] = {}) { - this.username = username; - this.password = password; - this.port = port; - this.requestOpts = requestOpts - } - - async context() { - return request.newContext({ - baseURL: `http://localhost:${this.port}`, - extraHTTPHeaders: { - Authorization: `Basic ${encodeAuth(this.username, this.password)}`, - }, - ...this.requestOpts, - }); - } - - async doPost(path: string, data: unknown = {}) { - const ctx = await this.context(); - - return ctx.post(path, { data }); - } - - async works() { - await PromiseRetry(async retry => { - const resp = await this.doPost('/v1/Settings/Get').catch(err => retry(err)) - const respBody = await resp.json().catch(err => retry(err)) - - try { - expect(resp.ok()).toBeTruthy() - expect(respBody).toHaveProperty('settings') - } catch(err) { - return retry(err) - } - }, { - retries: 30, - minTimeout: 1000, - maxTimeout: 1000, - }) - } -} - -export default PMMRestClient; diff --git a/cli-tests/tests/mongodb.spec.ts b/cli-tests/tests/mongodb.spec.ts deleted file mode 100644 index d8e79ba70a..0000000000 --- a/cli-tests/tests/mongodb.spec.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { test, expect } from '@playwright/test'; -// import cli = require('@helpers/cliHelper'); //optional way to import with local name -import * as cli from '@helpers/cliHelper'; - -test.describe('Spec file for MongoDB CLI tests ', async () => { - test('pmm-admin mongodb --help check for socket @cli @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - /* - echo "$output" - [ "$status" -eq 0 ] - [[ ${lines[0]} =~ "Usage: pmm-admin add mongodb [ [
]]" ]] - echo "${output}" | grep -- "--socket=STRING" -*/ - await test.step('Verify "--socket=STRING" is present', async () => { - await output.assertSuccess(); - await expect(output.stdout).toContain('Usage: pmm-admin add mongodb [ [
]]'); - await expect(output.stdout).toContain('--socket=STRING'); - }); - }); - - test('run pmm-admin add mongodb --help to check metrics-mode="auto" @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - /* - echo "$output" - [ "$status" -eq 0 ] - echo "${output}" | grep "metrics-mode=\"auto\"" -*/ - await test.step('Verify metrics-mode="auto" is present', async () => { - await output.assertSuccess(); - await expect(output.stdout).toContain('Usage: pmm-admin add mongodb [ [
]]'); - await expect(output.stdout).toContain('metrics-mode="auto"'); - }); - }); - - test('run pmm-admin add mongodb --help to check host @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - /* - echo "$output" - [ "$status" -eq 0 ] - echo "${output}" | grep "host" -*/ - await test.step('Verify "metrics-mode="auto" is present', async () => { - await output.assertSuccess(); - await expect(output.stdout).toContain('Usage: pmm-admin add mongodb [ [
]]'); - await expect(output.stdout).toContain('host'); - }); - }); - - test('run pmm-admin add mongodb --help to check port @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - /* - echo "$output" - [ "$status" -eq 0 ] - echo "${output}" | grep "port" -*/ - await test.step('Verify "port" is present', async () => { - await output.assertSuccess(); - await expect(output.stdout).toContain('Usage: pmm-admin add mongodb [ [
]]'); - await expect(output.stdout).toContain('--socket=STRING'); - }); - }); - - test('run pmm-admin add mongodb --help to check service-name @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - /* - echo "$output" - [ "$status" -eq 0 ] - echo "${output}" | grep "service-name" -*/ - await test.step('Verify "service-name" is present', async () => { - await output.assertSuccess(); - await expect(output.stdout).toContain('Usage: pmm-admin add mongodb [ [
]]'); - await expect(output.stdout).toContain('service-name'); - }); - }); - - test('@PMM-T925 - Verify help for pmm-admin add mongodb has TLS-related flags @mongo', async ({}) => { - const output = await cli.exec('pmm-admin add mongodb --help'); - - /* - echo "$output" - [ "$status" -eq 0 ] - echo "${output}" | grep "tls Use TLS to connect to the database" - echo "${output}" | grep "tls-skip-verify Skip TLS certificates validation" - echo "${output}" | grep "tls-certificate-key-file=STRING" - echo "${output}" | grep "tls-certificate-key-file-password=STRING" - echo "${output}" | grep "tls-ca-file=STRING Path to certificate authority file" - echo "${output}" | grep "authentication-mechanism=STRING" - echo "${output}" | grep "authentication-database=STRING" -*/ - await output.assertSuccess(); - await output.containsMany([ - 'tls Use TLS to connect to the database', - 'tls-skip-verify Skip TLS certificates validation', - 'tls-certificate-key-file=STRING', - 'tls-certificate-key-file-password=STRING', - 'tls-ca-file=STRING Path to certificate authority file', - 'authentication-mechanism=STRING', - 'authentication-database=STRING', - ]); - }); -}); diff --git a/cli-tests/tests/pmm-cli/pmm.spec.ts b/cli-tests/tests/pmm-cli/pmm.spec.ts index c1f51cffe8..75a880a8ce 100644 --- a/cli-tests/tests/pmm-cli/pmm.spec.ts +++ b/cli-tests/tests/pmm-cli/pmm.spec.ts @@ -1,9 +1,9 @@ -import { test, expect } from '@playwright/test' -import * as cli from '@helpers/cliHelper' +import { test, expect } from '@playwright/test'; +import * as cli from '@helpers/cliHelper'; test.describe('PMM binary tests @pmm-cli', async () => { test('--version', async ({}) => { - const output = await cli.exec('pmm --version') - await output.assertSuccess() - }) -}) + const output = await cli.exec('pmm --version'); + await output.assertSuccess(); + }); +}); diff --git a/cli-tests/tests/pmm-cli/server/docker-specific.spec.ts b/cli-tests/tests/pmm-cli/server/docker-specific.spec.ts new file mode 100644 index 0000000000..6ec303c0af --- /dev/null +++ b/cli-tests/tests/pmm-cli/server/docker-specific.spec.ts @@ -0,0 +1,44 @@ +import { expect, test } from '@playwright/test'; +import * as cli from '@helpers/cliHelper'; + +/** + * Test are chained in the order to keep docker working after both CI/CD and local runs: + * 1. Uninstall docker from OS -> check error + * 2. Docker installed by pmm bin + * 3. Remove $USER's privileges to use docker -> check error + * 4. Restore $USER's privileges + */ +test.describe('PMM Server Install Docker specific tests', async () => { + const expectedErrorMessage = 'DockerNoAccess: docker is either not running or this user has no access to Docker. Try running as root'; + + test.afterAll(async () => { + const output = await cli.exec('sudo usermod -a -G docker $USER'); + }); + + test.skip('PMM-T1615 "pmm server docker install" flag --skip-docker-install is respected @pmm-cli', async ({ }) => { + // Remove docker for test + // dpkg -l | grep -i docker + // sudo apt-get purge -y docker-engine docker docker.io docker-ce docker-ce-cli docker-compose-plugin || true + // sudo apt-get autoremove -y --purge docker-engine docker docker.io docker-ce docker-compose-plugin + const output = await cli.exec('pmm server docker install --json --skip-docker-install'); + await output.exitCodeEquals(1); + await output.outContains(expectedErrorMessage); + }); + + test.skip('PMM-T1569 "pmm server docker install" installs server when docker not installed @pmm-cli', async ({ }) => { + // remove docker and run pmm 'pmm server docker install' + const output = await cli.exec('pmm server docker install --json'); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + }); + + test.skip('PMM-T1574 "pmm server docker install" displays error if user does not have privileges to use docker @pmm-cli', async ({ }) => { + // change privileges for test + await (await cli.exec('sudo gpasswd -d $USER docker')).assertSuccess(); + await (await cli.exec('exec newgrp $USER')).assertSuccess(); + + const output = await cli.exec('pmm server docker install --skip-docker-install'); + await output.exitCodeEquals(1); + await output.outContains(expectedErrorMessage); + }); +}); diff --git a/cli-tests/tests/pmm-cli/server/install.spec.ts b/cli-tests/tests/pmm-cli/server/install.spec.ts index e2e159ac5c..05b11c8b26 100644 --- a/cli-tests/tests/pmm-cli/server/install.spec.ts +++ b/cli-tests/tests/pmm-cli/server/install.spec.ts @@ -1,61 +1,161 @@ -import { test, expect } from '@playwright/test' -import * as cli from '@helpers/cliHelper' -import PMMRestClient from '@tests/support/types/request' -import { teardown } from '@tests/helpers/containers' - -test.describe.configure({ mode: 'parallel' }) - -test.describe('Install PMM Server', async () => { - test('shall respect relevant flags', async ({ }) => { - const adminPw = 'admin123' - const containerName = 'pmm-server-install-test' - const imageName = 'percona/pmm-server:2.32.0' - const volumeName = 'pmm-data-install-test' - - try { - let output = await cli.exec(` - pmm server docker install - --json - --admin-password=${adminPw} - --docker-image="${imageName}" - --https-listen-port=1443 - --http-listen-port=1080 - --container-name=${containerName} - --volume-name=${volumeName}` - ) - - // Output - await output.assertSuccess() - expect(output.stderr).toContain('Starting PMM Server') - - // http client - let client = new PMMRestClient('admin', adminPw, 1080) - await client.works() - - // https client - client = new PMMRestClient('admin', adminPw, 1443, { - baseURL: 'https://localhost:1443', - ignoreHTTPSErrors: true, - }) - let resp = await client.doPost('/v1/Settings/Get') - let respBody = await resp.json() - - expect(resp.ok()).toBeTruthy() - expect(respBody).toHaveProperty('settings') - - // Container name - output = await cli.exec(`docker ps --format="{{.Names}}" | grep "^${containerName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Volume name - output = await cli.exec(`docker volume ls --format="{{.Name}}" | grep "^${volumeName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Docker image - output = await cli.exec(`docker ps --format="{{.Names}} {{.Image}}" | grep -E "^${containerName} ${imageName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - } finally { - await teardown([`^${containerName}$`], [volumeName]) - } - }) -}) +import { test, expect } from '@playwright/test'; +import * as cli from '@helpers/cliHelper'; +import { verifyPmmServerProperties } from '@helpers/customAssertions'; + +test.describe.configure({ mode: 'parallel' }); + +test.describe('PMM Server Install tests', async () => { + const defaultAdminPassword = 'admin'; + const defaultServImage = 'percona/pmm-server:2'; + const defaultContainerName = 'pmm-server'; + const defaultVolumeName = 'pmm-data'; + const adminPassword = 'admin123'; + + test('PMM-T1570 "pmm server docker install" works with no flags @pmm-cli', async ({ }) => { + const output = await cli.exec('pmm server docker install'); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain \'Starting PMM Server\'').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: defaultContainerName, + imageName: defaultServImage, + volumeName: defaultVolumeName, + httpPort: 80, + httpsPort: 443, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1610 "pmm server docker install" respects relevant flags @pmm-cli', async ({ }) => { + const containerName = 'pmm-server-install-test'; + const volumeName = 'pmm-data-install-test'; + // TODO: add getHttpPort() getHttpsPort() methods to remove manual attention. + const httpPort = 1080; + const httpsPort = 1443; + + const output = await cli.exec(` + pmm server docker install + --admin-password=${adminPassword} + --docker-image="${process.env.server_image}" + --https-listen-port=${httpsPort} + --http-listen-port=${httpPort} + --container-name=${containerName} + --volume-name=${volumeName}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName, + imageName: process.env.server_image, + volumeName, + httpPort, + httpsPort, + adminPassword, + }); + }); + + test('PMM-T1660 "pmm server docker install" shows error for short password @pmm-cli', async ({ }) => { + const containerName = 'pmm-server-short-pass'; + const volumeName = 'pmm-data-short-pass'; + const httpPort = 1081; + const httpsPort = 1444; + + const output = await cli.exec(` + pmm server docker install + --admin-password="test" + --https-listen-port=${httpsPort} + --http-listen-port=${httpPort} + --container-name=${containerName} + --volume-name=${volumeName}`); + await output.assertSuccess(); + await output.outContains('Error: ✗ new password is too short'); + await output.errContainsMany([ + 'Starting PMM Server', + 'Password change exit code: 1', + 'Password change failed. Use the default password "admin"', + ]); + + await verifyPmmServerProperties({ + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1575 "pmm server docker install" exits if volume already exists @pmm-cli', async ({ }) => { + const volumeName = 'pmm-data-exists'; + await (await cli.exec(`docker volume create ${volumeName}`)).assertSuccess(); + const output = await cli.exec(` + pmm server docker install + --volume-name=${volumeName}`); + await output.exitCodeEquals(1); + await output.outContains(`VolumeExists: docker volume with name "${volumeName}" already exists`); + }); + + test('PMM-T1576 "pmm server docker install" exits if docker container is already present @pmm-cli', async ({ }) => { + const containerName = 'pmm-server-exists'; + const httpsPort = 1445; + const httpPort = 1082; + await (await cli.exec(` + sudo pmm server docker install + --https-listen-port=${httpsPort} + --http-listen-port=${httpPort} + --container-name=${containerName} + --volume-name=pmm-data-123`)).assertSuccess(); + const output = await cli.exec(` + pmm server docker install + --container-name=${containerName} + --volume-name=pmm-data-124`); + await output.exitCodeEquals(1); + await output.outContains(`Error response from daemon: Conflict. The container name "/${containerName}" is already in use by container`); + }); + + // TODO: PMM-T1616 scenario requires a review. Why this flag is actually needed? + test('PMM-T1616 "pmm server docker install" flag --skip-change-password is respected @pmm-cli', async ({ }) => { + const containerName = 'pmm-server-skip-pass'; + const volumeName = 'pmm-data-skip-pass'; + const httpsPort = 1446; + const httpPort = 1888; + const output = await cli.exec(` + pmm server docker install + --skip-change-password + --https-listen-port=${httpsPort} + --http-listen-port=${httpPort} + --container-name=${containerName} + --volume-name=${volumeName}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + expect(output.stdout, 'Stdout should not contain "Changing password"!').not.toContain('Changing password'); + + await verifyPmmServerProperties({ + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1616 "pmm server docker install" flag --skip-change-password is respected with present password change flag' + + ' @pmm-cli', async ({ }) => { + const containerName = 'pmm-server-skip-flag'; + const volumeName = 'pmm-data-skip-flag'; + const httpsPort = 1447; + const httpPort = 1889; + const output = await cli.exec(` + pmm server docker install + --skip-change-password + --admin-password=${adminPassword} + --https-listen-port=${httpsPort} + --http-listen-port=${httpPort} + --container-name=${containerName} + --volume-name=${volumeName}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + expect(output.stdout, 'Stdout should not contain "Changing password"!').not.toContain('Changing password'); + + await verifyPmmServerProperties({ + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); +}); diff --git a/cli-tests/tests/pmm-cli/server/not-parallel.spec.ts b/cli-tests/tests/pmm-cli/server/not-parallel.spec.ts deleted file mode 100644 index 5104677323..0000000000 --- a/cli-tests/tests/pmm-cli/server/not-parallel.spec.ts +++ /dev/null @@ -1,48 +0,0 @@ -import { test, expect } from '@playwright/test' -import * as cli from '@helpers/cliHelper' -import PMMRestClient from '@tests/support/types/request' -import { teardown } from '@tests/helpers/containers' - -test.describe('Install PMM Server - not parallel', async () => { - test('shall install with no flags', async ({ }) => { - try { - const output = await cli.exec(`pmm server docker install --json`) - await output.assertSuccess() - expect(output.stderr).toContain('Starting PMM Server') - - // http client - const client = new PMMRestClient('admin', 'admin') - await client.works() - } finally { - await teardown(['^pmm-server$'], ['pmm-data']) - } - }) -}) - -test.describe('Upgrade PMM Server - not parallel', async () => { - test('shall upgrade with no flags', async () => { - const oldImage = 'percona/pmm-server:2.32.0' - - try { - let output = await cli.exec(` - pmm server docker install - --json - --docker-image="${oldImage}"` - ) - await output.assertSuccess() - - output = await cli.exec(`pmm server docker upgrade --json`) - await output.assertSuccess() - expect(output.stderr).toContain('Starting PMM Server') - - // Docker image - output = await cli.exec(`docker ps --format="{{.Image}}" | grep "^percona/pmm-server:2$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - const client = new PMMRestClient('admin', 'admin') - await client.works() - } finally { - await teardown(['^pmm-server$', '^pmm-server-[-0-9]+$'], ['pmm-data']) - } - }) -}) diff --git a/cli-tests/tests/pmm-cli/server/upgrade.spec.ts b/cli-tests/tests/pmm-cli/server/upgrade.spec.ts index 3a0af17c2a..05046aefd9 100644 --- a/cli-tests/tests/pmm-cli/server/upgrade.spec.ts +++ b/cli-tests/tests/pmm-cli/server/upgrade.spec.ts @@ -1,104 +1,214 @@ -import { test, expect } from '@playwright/test' -import * as cli from '@helpers/cliHelper' -import PMMRestClient from '@tests/support/types/request' -import { teardown } from '@tests/helpers/containers' - -test.describe.configure({ mode: 'parallel' }) - -test.describe('Upgrade PMM Server', async () => { - test('shall respect relevant flags', async () => { - const oldContainerName = 'pmm-server-upgrade-1' - const newContainerName = 'pmm-server-upgrade-1-new' - const oldImage = 'percona/pmm-server:2.32.0' - const newImage = 'percona/pmm-server:2.33.0' - const volumeName = 'pmm-data-upgrade-1' - - try { - let output = await cli.exec(` - pmm server docker install - --json - --docker-image="${oldImage}" - --http-listen-port=3080 - --https-listen-port=3443 - --container-name=${oldContainerName} - --volume-name=${volumeName}` - ) - await output.assertSuccess() - - output = await cli.exec(` - pmm server docker upgrade - --json - --docker-image="${newImage}" - --container-id=${oldContainerName} - --new-container-name=${newContainerName}` - ) - await output.assertSuccess() - expect(output.stderr).toContain('Starting PMM Server') - - // Container name - output = await cli.exec(`docker ps --format="{{.Names}}" | grep "^${newContainerName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Volume name - output = await cli.exec(`docker volume ls --format="{{.Name}}" | grep "^${volumeName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Docker image - output = await cli.exec(`docker ps --format="{{.Names}} {{.Image}}" | grep -E "^${newContainerName} ${newImage}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - const client = new PMMRestClient('admin', 'admin', 3080) - await client.works() - } finally { - await teardown(['^pmm-server-upgrade-1'], [`^${volumeName}`]) - } - }) - - test('shall respect container name prefix', async () => { - const oldContainerName = 'pmm-server-upgrade-2' - const newContainerName = 'pmm-server-upgrade-2-new' - const oldImage = 'percona/pmm-server:2.32.0' - const newImage = 'percona/pmm-server:2.33.0' - const volumeName = 'pmm-data-upgrade-2' - - try { - let output = await cli.exec(` - pmm server docker install - --json - --docker-image="${oldImage}" - --http-listen-port=4080 - --https-listen-port=4443 - --container-name=${oldContainerName} - --volume-name=${volumeName}` - ) - await output.assertSuccess() - - output = await cli.exec(` - pmm server docker upgrade - --json - --docker-image="${newImage}" - --container-id=${oldContainerName} - --new-container-name-prefix=${newContainerName}` - ) - await output.assertSuccess() - expect(output.stderr).toContain('Starting PMM Server') - - // Container name - output = await cli.exec(`docker ps --format="{{.Names}}" | grep -E "^${newContainerName}.+$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Volume name - output = await cli.exec(`docker volume ls --format="{{.Name}}" | grep "^${volumeName}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - // Docker image - output = await cli.exec(`docker ps --format="{{.Names}} {{.Image}}" | grep -E "^${newContainerName}.+ ${newImage}$" | wc -l`) - expect(output.stdout.trim()).toEqual('1') - - const client = new PMMRestClient('admin', 'admin', 4080) - await client.works() - } finally { - await teardown(['^pmm-server-upgrade-2'], [`^${volumeName}`]) - } - }) -}) +import { expect, test } from '@playwright/test'; +import * as cli from '@helpers/cliHelper'; +import { verifyPmmServerProperties } from '@helpers/customAssertions'; +import { runOldPmmServer } from '@helpers/containers'; + +const defaultAdminPassword = 'admin'; +const defaultServImage = 'percona/pmm-server:2'; +const defaultVolumeName = 'pmm-data'; + +test.describe.configure({ mode: 'parallel' }); + +test.describe('pmm-bin: server upgrade tests', async () => { + const adminPassword = 'admin123'; + + test('"pmm server docker upgrade" works with no flags', async () => { + let output = await cli.exec(` + pmm server docker install + --json + --docker-image="${process.env.old_server_image}"`); + await output.assertSuccess(); + + output = await cli.exec('pmm server docker upgrade --json'); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: output.generateContainerNameFromLogs(), + imageName: defaultServImage, + volumeName: defaultVolumeName, + httpPort: 80, + httpsPort: 443, + adminPassword: defaultAdminPassword, + }); + }); + + test('"pmm server docker upgrade" respects relevant flags', async () => { + const oldContainerName = 'pmm-server-upgrade-1'; + const newContainerName = 'pmm-server-upgrade-1-new'; + const volumeName = 'pmm-data-upgrade-1'; + const httpPort = 3080; + const httpsPort = 3443; + + await (await cli.exec(` + pmm server docker install + --json + --admin-password=${adminPassword} + --docker-image="${process.env.old_server_image}" + --http-listen-port=${httpPort} + --https-listen-port=${httpsPort} + --container-name=${oldContainerName} + --volume-name=${volumeName}`)).assertSuccess(); + + const output = await cli.exec(` + pmm server docker upgrade + --json + --docker-image="${process.env.server_image}" + --container-id=${oldContainerName} + --new-container-name=${newContainerName}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: newContainerName, + imageName: process.env.server_image, + volumeName, + httpPort, + httpsPort, + adminPassword, + }); + }); + + test('"pmm server docker upgrade" respects container name prefix', async () => { + const oldContainerName = 'pmm-server-upgrade-2'; + const newContainerPrefix = 'pmm-server-upg'; + const volumeName = 'pmm-data-upgrade-2'; + const httpPort = 4080; + const httpsPort = 4443; + + await (await cli.exec(` + pmm server docker install + --json + --docker-image="${process.env.old_server_image}" + --http-listen-port=${httpPort} + --https-listen-port=${httpsPort} + --container-name=${oldContainerName} + --volume-name=${volumeName}`)).assertSuccess(); + + const output = await cli.exec(` + pmm server docker upgrade + --json + --docker-image="${process.env.server_image}" + --container-id=${oldContainerName} + --new-container-name-prefix=${newContainerPrefix}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: output.generateContainerNameFromLogs(newContainerPrefix), + imageName: process.env.server_image, + volumeName, + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1680 "pmm server docker upgrade" upgrades non-CLI containers', async () => { + const oldContainerName = 'pmm-server-non-cli'; + const volumeName = 'pmm-data-non-cli'; + const httpPort = 4079; + const httpsPort = 4444; + await runOldPmmServer(httpPort, httpsPort, volumeName, oldContainerName); + + await cli.execSilent('sleep 1'); // to avoid same name + const output = await cli.exec(` + pmm server docker upgrade -y + --json + --container-id=${oldContainerName}`); + await output.assertSuccess(); + + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: output.generateContainerNameFromLogs(), + imageName: defaultServImage, + volumeName, + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1682 "pmm server docker upgrade" gives warning for non-CLI containers', async () => { + const oldContainerName = 'pmm-server-non-cli-warn'; + const newContainerPrefix = 'pmm-server-warn'; + const volumeName = 'pmm-data-non-cli-warn'; + const httpPort = 4078; + const httpsPort = 4445; + await runOldPmmServer(httpPort, httpsPort, volumeName, oldContainerName); + + const output = await cli.exec(` + pmm server docker upgrade -y + --new-container-name-prefix=${newContainerPrefix} + --container-id=${oldContainerName}`); + await output.assertSuccess(); + + await output.outContainsMany([ + `PMM Server in the container "${oldContainerName}" was not installed via pmm cli.`, + 'We will attempt to upgrade the container and perform the following actions:', + `- Stop the container "${oldContainerName}"`, + `- Back up all volumes in "${oldContainerName}"`, + `- Mount all volumes from "${oldContainerName}" in the new container`, + `- Share the same network ports as in "${oldContainerName}"`, + `The container "${oldContainerName}" will NOT be removed. You can remove it manually later, if needed.`, + ]); + }); + + test('PMM-T1685 "pmm server docker upgrade" flags are respected for non-CLI containers', async () => { + const oldContainerName = 'pmm-server-non-cli-flags'; + const newContainerName = 'pmm-server-non-cli-flags-new'; + const volumeName = 'pmm-data-non-cli-flags'; + const httpPort = 4081; + const httpsPort = 4446; + await runOldPmmServer(httpPort, httpsPort, volumeName, oldContainerName); + + const output = await cli.exec(` + pmm server docker upgrade -y + --json + --docker-image=${process.env.server_image} + --new-container-name=${newContainerName} + --container-id=${oldContainerName}`); + await output.assertSuccess(); + expect(output.stderr, 'stderr should contain "Starting PMM Server"').toContain('Starting PMM Server'); + + await verifyPmmServerProperties({ + containerName: newContainerName, + imageName: process.env.server_image, + volumeName, + httpPort, + httpsPort, + adminPassword: defaultAdminPassword, + }); + }); + + test('PMM-T1687 "pmm server docker upgrade" respects container name prefix for non-CLI containers', async () => { + const oldContainerName = 'pmm-server-non-cli-prefix'; + const newContainerPrefix = 'pmm-server-prefix'; + const volumeName = 'pmm-data-non-cli-prefix'; + const httpPort = 4083; + const httpsPort = 4447; + await runOldPmmServer(httpPort, httpsPort, volumeName, oldContainerName); + + const output = await cli.exec(` + pmm server docker upgrade -y + --json + --new-container-name-prefix=${newContainerPrefix} + --container-id=${oldContainerName}`); + await output.assertSuccess(); + // TODO: include json format warning verification after PMM-12035 is done + // await output.outContainsMany([ + // `PMM Server in the container "${oldContainerName}" was not installed via pmm cli.`, + // 'We will attempt to upgrade the container and perform the following actions:', + // `- Stop the container "${oldContainerName}"`, + // `- Back up all volumes in "${oldContainerName}"`, + // `- Mount all volumes from "${oldContainerName}" in the new container`, + // `- Share the same network ports as in "${oldContainerName}"`, + // `The container "${oldContainerName}" will NOT be removed. You can remove it manually later, if needed.`, + // ]); + await verifyPmmServerProperties({ + containerName: output.generateContainerNameFromLogs(newContainerPrefix), + imageName: defaultServImage, + }); + }); +}); diff --git a/docker-compose.yml b/docker-compose.yml index fee48fea1e..a661082e6d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -86,7 +86,7 @@ services: - ${PMM_PORT_CH_HTTP:-11123}:8123 volumes: - ./:/root/go/src/github.com/percona/pmm - - ./Makefile.devcontainer:/root/go/src/github.com/percona/pmm/Makefile:ro # change Makefile in devcontainer + - ./Makefile.devcontainer:/root/go/src/github.com/percona/pmm/Makefile:ro # substitute Makefile in devcontainer # caching - go-modules:/root/go/pkg/mod - root-cache:/root/.cache diff --git a/go.mod b/go.mod index d5a0ca448c..d7cdb556b1 100644 --- a/go.mod +++ b/go.mod @@ -18,32 +18,32 @@ replace github.com/ClickHouse/clickhouse-go/151 => github.com/ClickHouse/clickho require ( github.com/AlekSi/pointer v1.2.0 github.com/ClickHouse/clickhouse-go/151 v0.0.0-00010101000000-000000000000 - github.com/ClickHouse/clickhouse-go/v2 v2.9.0 + github.com/ClickHouse/clickhouse-go/v2 v2.10.0 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/alecthomas/kong v0.7.1 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 - github.com/aws/aws-sdk-go v1.44.245 + github.com/aws/aws-sdk-go v1.44.270 github.com/blang/semver v3.5.1+incompatible github.com/brianvoe/gofakeit/v6 v6.21.0 github.com/charmbracelet/bubbles v0.15.0 - github.com/charmbracelet/bubbletea v0.23.2 + github.com/charmbracelet/bubbletea v0.24.0 github.com/charmbracelet/lipgloss v0.7.1 github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker v20.10.23+incompatible + github.com/docker/docker v23.0.4+incompatible github.com/docker/go-connections v0.4.0 - github.com/envoyproxy/protoc-gen-validate v0.10.1 - github.com/go-co-op/gocron v1.22.2 + github.com/envoyproxy/protoc-gen-validate v1.0.1 + github.com/go-co-op/gocron v1.28.0 github.com/go-openapi/errors v0.20.3 github.com/go-openapi/runtime v0.26.0 github.com/go-openapi/strfmt v0.21.7 github.com/go-openapi/swag v0.22.3 github.com/go-openapi/validate v0.22.1 - github.com/go-sql-driver/mysql v1.7.0 + github.com/go-sql-driver/mysql v1.7.1 github.com/golang-migrate/migrate/v4 v4.15.2 github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.3.0 - github.com/grafana/grafana-api-golang-client v0.19.0 + github.com/grafana/grafana-api-golang-client v0.21.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 @@ -51,8 +51,8 @@ require ( github.com/jmoiron/sqlx v1.3.5 github.com/jotaen/kong-completion v0.0.5 github.com/lib/pq v1.10.6 - github.com/minio/minio-go/v7 v7.0.52 - github.com/operator-framework/api v0.17.4 + github.com/minio/minio-go/v7 v7.0.55 + github.com/operator-framework/api v0.17.5 github.com/operator-framework/operator-lifecycle-manager v0.24.0 github.com/percona-platform/dbaas-api v0.0.0-20230103182808-d79c449a9f4c github.com/percona-platform/saas v0.0.0-20230306173543-c223f9a47342 @@ -65,35 +65,34 @@ require ( github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/prometheus/alertmanager v0.25.0 - github.com/prometheus/client_golang v1.15.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.44.0 github.com/ramr/go-reaper v0.2.1 github.com/robfig/cron/v3 v3.0.1 - github.com/sirupsen/logrus v1.9.0 + github.com/sirupsen/logrus v1.9.2 github.com/stretchr/objx v0.5.0 - github.com/stretchr/testify v1.8.2 - go.mongodb.org/mongo-driver v1.11.4 + github.com/stretchr/testify v1.8.3 + go.mongodb.org/mongo-driver v1.11.6 go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd - golang.org/x/crypto v0.6.0 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.7.0 + golang.org/x/crypto v0.9.0 + golang.org/x/sync v0.2.0 + golang.org/x/sys v0.8.0 golang.org/x/text v0.9.0 - golang.org/x/tools v0.8.0 - google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 - google.golang.org/grpc v1.55.0-dev + golang.org/x/tools v0.9.1 + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + google.golang.org/grpc v1.56.0-dev google.golang.org/protobuf v1.30.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/reform.v1 v1.5.1 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.27.1 + k8s.io/api v0.27.2 k8s.io/apiextensions-apiserver v0.26.2 - k8s.io/apimachinery v0.27.1 - k8s.io/cli-runtime v0.27.1 - k8s.io/client-go v0.27.1 - k8s.io/kubectl v0.27.1 - modernc.org/sqlite v1.21.1 + k8s.io/apimachinery v0.27.2 + k8s.io/cli-runtime v0.27.2 + k8s.io/client-go v0.27.2 + k8s.io/kubectl v0.27.2 + modernc.org/sqlite v1.22.1 sigs.k8s.io/controller-runtime v0.14.6 - vitess.io/vitess v0.15.2 ) require ( @@ -110,12 +109,12 @@ require ( github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect @@ -124,31 +123,26 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-proto-validators v0.3.2 // indirect - github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 // indirect github.com/percona/percona-backup-mongodb v1.8.1 // indirect - github.com/philhofer/fwd v1.0.0 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/posener/complete v1.2.3 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.1.1 // indirect - github.com/uber/jaeger-client-go v2.16.0+incompatible // indirect - github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/tidwall/pretty v1.2.0 // indirect github.com/xlab/treeprint v1.1.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.22.3 // indirect + modernc.org/libc v1.22.5 // indirect modernc.org/mathutil v1.5.0 // indirect modernc.org/memory v1.5.0 // indirect modernc.org/opt v0.1.3 // indirect @@ -162,11 +156,11 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/ClickHouse/ch-go v0.52.1 // indirect github.com/ClickHouse/clickhouse-go v1.5.4 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 @@ -179,8 +173,8 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charmbracelet/harmonica v0.2.0 // indirect github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect - github.com/containerd/console v1.0.3 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-faster/city v1.0.1 // indirect @@ -209,18 +203,18 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -238,12 +232,12 @@ require ( github.com/paulmach/orb v0.9.0 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.8.2 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rs/xid v1.4.0 // indirect + github.com/rs/xid v1.5.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect @@ -256,9 +250,9 @@ require ( go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools/v3 v3.3.0 // indirect diff --git a/go.sum b/go.sum index 5724421694..4d0d8d36a0 100644 --- a/go.sum +++ b/go.sum @@ -58,12 +58,12 @@ github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tS github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 h1:eoQrCw9DMThzbJ32fHXZtISnURk6r0TozXiWuTsay5s= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1/go.mod h1:21rlzm+SuYrS9ARS92XEGxcHQeLVDcaY2YV30rHjSd4= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= @@ -94,8 +94,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/ch-go v0.52.1 h1:nucdgfD1BDSHjbNaG3VNebonxJzD8fX8jbuBpfo5VY0= @@ -105,8 +105,8 @@ github.com/ClickHouse/clickhouse-go v1.5.1 h1:I8zVFZTz80crCs0FFEBJooIxsPcV0xfthz github.com/ClickHouse/clickhouse-go v1.5.1/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/ClickHouse/clickhouse-go/v2 v2.9.0 h1:vh0D+9p5hKWLYqCfU8Hd7/GMXNfHC84isUf7sNlJrZk= -github.com/ClickHouse/clickhouse-go/v2 v2.9.0/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg= +github.com/ClickHouse/clickhouse-go/v2 v2.10.0 h1:0w/A50D5MfsRUYBaV6rLKwZ4LXWKLZKJ1u31QXjTIO4= +github.com/ClickHouse/clickhouse-go/v2 v2.10.0/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -190,8 +190,8 @@ github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M= -github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.270 h1:fRdrwRVO0PpRSks/bNFXSRexA7Zm+k6pvKRpnrpAmeg= +github.com/aws/aws-sdk-go v1.44.270/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= @@ -219,7 +219,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21 github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= -github.com/aymanbagabas/go-osc52 v1.2.1/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -271,8 +270,8 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/charmbracelet/bubbles v0.15.0 h1:c5vZ3woHV5W2b8YZI1q7v4ZNQaPetfHuoHzx+56Z6TI= github.com/charmbracelet/bubbles v0.15.0/go.mod h1:Y7gSFbBzlMpUDR/XM9MhZI374Q+1p1kluf1uLl8iK74= github.com/charmbracelet/bubbletea v0.23.1/go.mod h1:JAfGK/3/pPKHTnAS8JIE2u9f61BjWTQY57RbT25aMXU= -github.com/charmbracelet/bubbletea v0.23.2 h1:vuUJ9HJ7b/COy4I30e8xDVQ+VRDUEFykIjryPfgsdps= -github.com/charmbracelet/bubbletea v0.23.2/go.mod h1:FaP3WUivcTM0xOKNmhciz60M6I+weYLF76mr1JyI7sM= +github.com/charmbracelet/bubbletea v0.24.0 h1:l8PHrft/GIeikDPCUhQe53AJrDD8xGSn0Agirh8xbe8= +github.com/charmbracelet/bubbletea v0.24.0/go.mod h1:rK3g/2+T8vOSEkNHvtq40umJpeVYDn6bLaqbgzhL/hg= github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= github.com/charmbracelet/lipgloss v0.6.0/go.mod h1:tHh2wr34xcHjC2HCXIlGSG1jaDF0S0atAUvBMP6Ppuk= @@ -331,8 +330,9 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -454,18 +454,19 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8= github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= -github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -500,8 +501,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -529,8 +530,8 @@ github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYis github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-co-op/gocron v1.22.2 h1:5+486wUbSp2Tgodv3Fwek0OgMK/aqjcgGBcRTcT2kgs= -github.com/go-co-op/gocron v1.22.2/go.mod h1:UqVyvM90I1q/R1qGEX6cBORI6WArLuEgYlbncLMvzRM= +github.com/go-co-op/gocron v1.28.0 h1:7TeuQggXS2xq1x76+50sRt5TxT3Tj+aKn0hkah2vpyw= +github.com/go-co-op/gocron v1.28.0/go.mod h1:39f6KNSGVOU1LO/ZOoZfcSxwlsJDQOKSu8erN0SH48Y= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= @@ -612,8 +613,8 @@ github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUri github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -675,8 +676,7 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -794,8 +794,8 @@ github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/grafana-api-golang-client v0.19.0 h1:4z8voB2nv/bMiP4WbCKdhCym2mltSDFUzOrDqzohrw0= -github.com/grafana/grafana-api-golang-client v0.19.0/go.mod h1:24W29gPe9yl0/3A9X624TPkAOR8DpHno490cPwnkv8E= +github.com/grafana/grafana-api-golang-client v0.21.1 h1:39Nqvk5qPBpdrA+uF8sThIGCD9DDbYsJLbOo2WN0g5U= +github.com/grafana/grafana-api-golang-client v0.21.1/go.mod h1:24W29gPe9yl0/3A9X624TPkAOR8DpHno490cPwnkv8E= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -837,6 +837,7 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= @@ -968,10 +969,9 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -983,6 +983,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= @@ -1033,8 +1034,8 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1068,10 +1069,10 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= -github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps= -github.com/minio/minio-go/v7 v7.0.52/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/minio-go/v7 v7.0.55 h1:ZXqUO/8cgfHzI+08h/zGuTTFpISSA32BZmBE3FCLJas= +github.com/minio/minio-go/v7 v7.0.55/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1126,7 +1127,6 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/termenv v0.11.1-0.20220204035834-5ac8409525e0/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc= -github.com/muesli/termenv v0.14.0/go.mod h1:kG/pF1E7fh949Xhe156crRUrHNyK221IuGO7Ez60Uc8= github.com/muesli/termenv v0.15.1 h1:UzuTb/+hhlBugQz28rpzey4ZuKcZ03MeKsoG7IJZIxs= github.com/muesli/termenv v0.15.1/go.mod h1:HeAQPTzpfs016yGtA4g00CsdYnVLJvxsS4ANqrZs2sQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1205,13 +1205,11 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= -github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/operator-framework/api v0.17.4 h1:ZNTnHaCncZ3IQ9KcYXFkEdHZXsZZCBy/HbWU7R4uAZM= -github.com/operator-framework/api v0.17.4/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= +github.com/operator-framework/api v0.17.5 h1:9d0pc6m1Vp4QeS8i5dhl/B0nifhKQdtw+iFsNx0An0Q= +github.com/operator-framework/api v0.17.5/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= github.com/operator-framework/operator-lifecycle-manager v0.24.0 h1:9LOfvyohGEkNHwcOGOgw+w3ZAnGeT6JVh3CvIbWpnus= github.com/operator-framework/operator-lifecycle-manager v0.24.0/go.mod h1:2zDUxcpW2idTLjRw36WlMetHZ50Nlf1C3JxASPfYS20= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1245,19 +1243,19 @@ github.com/percona/promconfig v0.2.5/go.mod h1:Y2uXi5QNk71+ceJHuI9poank+0S1kjxd3 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pganalyze/pg_query_go/v2 v2.2.0 h1:OW+reH+ZY7jdEuPyuLGlf1m7dLbE+fDudKXhLs0Ttpk= github.com/pganalyze/pg_query_go/v2 v2.2.0/go.mod h1:XAxmVqz1tEGqizcQ3YSdN90vCOHBWjJi8URL1er5+cA= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1281,15 +1279,15 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= -github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1302,8 +1300,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= @@ -1342,10 +1340,12 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1385,8 +1385,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1431,8 +1431,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1440,18 +1441,12 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= -github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= -github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= -github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= -github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1516,8 +1511,8 @@ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R7 go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= -go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= -go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.11.6 h1:XM7G6PjiGAO5betLF13BIa5TlLUUE3uJ/2Ox3Lz1K+o= +go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1558,6 +1553,7 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1694,8 +1690,8 @@ golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1714,8 +1710,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1729,8 +1725,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1860,15 +1856,16 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1973,16 +1970,14 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= @@ -2105,8 +2100,8 @@ google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 h1:znp6mq/drrY+6khTAlJUDNFFcDGV2ENLYKpMq8SyCds= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2141,10 +2136,9 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo= -google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.0-dev h1:3XdSkn+E4E0OxKEID50paHDwVA7cqZVolkHtMFaoQJA= +google.golang.org/grpc v1.56.0-dev/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2160,8 +2154,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= -gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2210,7 +2202,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= @@ -2229,8 +2220,8 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= -k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= +k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= +k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= k8s.io/apiextensions-apiserver v0.26.2 h1:/yTG2B9jGY2Q70iGskMf41qTLhL9XeNN2KhI0uDgwko= k8s.io/apiextensions-apiserver v0.26.2/go.mod h1:Y7UPgch8nph8mGCuVk0SK83LnS8Esf3n6fUBgew8SH8= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -2238,20 +2229,20 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= -k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= +k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= +k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/cli-runtime v0.27.1 h1:MMzp5Q/Xmr5L1Lrowuc+Y/r95XINC6c6/fE3aN7JDRM= -k8s.io/cli-runtime v0.27.1/go.mod h1:tEbTB1XP/nTH3wujsi52bw91gWpErtWiS15R6CwYsAI= +k8s.io/cli-runtime v0.27.2 h1:9HI8gfReNujKXt16tGOAnb8b4NZ5E+e0mQQHKhFGwYw= +k8s.io/cli-runtime v0.27.2/go.mod h1:9UecpyPDTkhiYY4d9htzRqN+rKomJgyb4wi0OfrmCjw= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= -k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= +k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= +k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= @@ -2276,10 +2267,10 @@ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/kubectl v0.27.1 h1:9T5c5KdpburYiW8XKQSH0Uly1kMNE90aGSnbYUZNdcA= -k8s.io/kubectl v0.27.1/go.mod h1:QsAkSmrRsKTPlAFzF8kODGDl4p35BIwQnc9XFhkcsy8= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= +k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2305,8 +2296,8 @@ modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.22.3 h1:D/g6O5ftAfavceqlLOFwaZuA5KYafKwmr30A6iSqoyY= -modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -2322,19 +2313,19 @@ modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= -modernc.org/sqlite v1.21.1 h1:GyDFqNnESLOhwwDRaHGdp2jKLDzpyT/rNLglX3ZkMSU= -modernc.org/sqlite v1.21.1/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI= +modernc.org/sqlite v1.22.1 h1:P2+Dhp5FR1RlVRkQ3dDfCiv3Ok8XPxqpe70IjYVA9oE= +modernc.org/sqlite v1.22.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= -modernc.org/tcl v1.15.1 h1:mOQwiEK4p7HruMZcwKTZPw/aqtGM4aY00uzWhlKKYws= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= @@ -2361,5 +2352,3 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -vitess.io/vitess v0.15.2 h1:+fl4ayKMtW9lnHdS4tzfuG9c4uDD0opNzgWqU8ODgjU= -vitess.io/vitess v0.15.2/go.mod h1:TQKDaMFeI3NfxGdztmXYHaf3E6xVBqJuHBb0qNRBHlY= diff --git a/managed/Makefile b/managed/Makefile index 387744dca6..9412255408 100644 --- a/managed/Makefile +++ b/managed/Makefile @@ -46,12 +46,13 @@ release-dev: ## Build pmm-managed binaries for development PMM_TEST_FLAGS ?= -timeout=180s PMM_TEST_RUN_UPDATE ?= 0 +PMM_TEST_FILES ?= ./... test: ## Run tests - go test $(PMM_TEST_FLAGS) -p 1 -race ./... + go test $(PMM_TEST_FLAGS) -p 1 -race $(PMM_TEST_FILES) test-cover: ## Run tests and collect per-package coverage information - go test $(PMM_TEST_FLAGS) -p 1 -race -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./... + go test $(PMM_TEST_FLAGS) -p 1 -race -coverprofile=cover.out -covermode=atomic -coverpkg=$(PMM_TEST_FILES) $(PMM_TEST_FILES) test-update: ## Run pmm update test PMM_TEST_RUN_UPDATE=1 go test -timeout=600s -v -p 1 -race ./services/supervisord -run ^TestDevContainer/Update$$ diff --git a/managed/cmd/pmm-managed-starlark/main_test.go b/managed/cmd/pmm-managed-starlark/main_test.go index abb730bbd5..f53c75620f 100644 --- a/managed/cmd/pmm-managed-starlark/main_test.go +++ b/managed/cmd/pmm-managed-starlark/main_test.go @@ -139,7 +139,7 @@ func TestStarlarkSandbox(t *testing.T) { actualStdout, err := cmd.Output() if err != nil { - exiterr, ok := err.(*exec.ExitError) + exiterr, ok := err.(*exec.ExitError) //nolint:errorlint require.True(t, ok) assert.Equal(t, tc.exitError, exiterr.Error()) assert.Equal(t, tc.exitCode, exiterr.ExitCode()) diff --git a/managed/cmd/pmm-managed/main.go b/managed/cmd/pmm-managed/main.go index d820222479..ceb9d77241 100644 --- a/managed/cmd/pmm-managed/main.go +++ b/managed/cmd/pmm-managed/main.go @@ -67,6 +67,7 @@ import ( backuppb "github.com/percona/pmm/api/managementpb/backup" dbaasv1beta1 "github.com/percona/pmm/api/managementpb/dbaas" iav1beta1 "github.com/percona/pmm/api/managementpb/ia" + nodev1beta1 "github.com/percona/pmm/api/managementpb/node" rolev1beta1 "github.com/percona/pmm/api/managementpb/role" servicev1beta1 "github.com/percona/pmm/api/managementpb/service" "github.com/percona/pmm/api/onboardingpb" @@ -207,7 +208,7 @@ type gRPCServerDeps struct { backupService *backup.Service compatibilityService *backup.CompatibilityService backupRemovalService *backup.RemovalService - pitrTimerangeService *backup.PITRTimerangeService + pbmPITRService *backup.PBMPITRService minioClient *minio.Client versionCache *versioncache.Service supervisord *supervisord.Service @@ -274,6 +275,7 @@ func runGRPCServer(ctx context.Context, deps *gRPCServerDeps) { managementpb.RegisterNodeServer(gRPCServer, managementgrpc.NewManagementNodeServer(nodeSvc)) agentv1beta1.RegisterAgentServer(gRPCServer, agentSvc) + nodev1beta1.RegisterMgmtNodeServer(gRPCServer, management.NewMgmtNodeService(deps.db, deps.agentsRegistry, v1.NewAPI(*deps.vmClient))) servicev1beta1.RegisterMgmtServiceServer(gRPCServer, management.NewMgmtServiceService(deps.db, deps.agentsRegistry, deps.agentsStateUpdater, deps.vmdb, v1.NewAPI(*deps.vmClient))) managementpb.RegisterServiceServer(gRPCServer, serviceSvc) managementpb.RegisterMySQLServer(gRPCServer, managementgrpc.NewManagementMySQLServer(mysqlSvc)) @@ -299,7 +301,7 @@ func runGRPCServer(ctx context.Context, deps *gRPCServerDeps) { backuppb.RegisterBackupsServer(gRPCServer, managementbackup.NewBackupsService(deps.db, deps.backupService, deps.compatibilityService, deps.schedulerService)) backuppb.RegisterLocationsServer(gRPCServer, managementbackup.NewLocationsService(deps.db, deps.minioClient)) - backuppb.RegisterArtifactsServer(gRPCServer, managementbackup.NewArtifactsService(deps.db, deps.backupRemovalService, deps.pitrTimerangeService)) + backuppb.RegisterArtifactsServer(gRPCServer, managementbackup.NewArtifactsService(deps.db, deps.backupRemovalService, deps.pbmPITRService)) backuppb.RegisterRestoreHistoryServer(gRPCServer, managementbackup.NewRestoreHistoryService(deps.db)) k8sServer := managementdbaas.NewKubernetesServer(deps.db, deps.dbaasClient, deps.versionServiceClient, deps.grafanaClient) @@ -398,6 +400,7 @@ func runHTTP1Server(ctx context.Context, deps *http1ServerDeps) { managementpb.RegisterNodeHandlerFromEndpoint, agentv1beta1.RegisterAgentHandlerFromEndpoint, + nodev1beta1.RegisterMgmtNodeHandlerFromEndpoint, servicev1beta1.RegisterMgmtServiceHandlerFromEndpoint, managementpb.RegisterServiceHandlerFromEndpoint, managementpb.RegisterMySQLHandlerFromEndpoint, @@ -662,7 +665,7 @@ func newClickhouseDB(dsn string, maxIdleConns, maxOpenConns int) (*sql.DB, error return db, nil } -func main() { +func main() { //nolint:cyclop // empty version breaks much of pmm-managed logic if version.Version == "" { panic("pmm-managed version is not set during build.") @@ -833,8 +836,8 @@ func main() { qanClient := getQANClient(ctx, sqlDB, *postgresDBNameF, *qanAPIAddrF) agentsRegistry := agents.NewRegistry(db, vmParams) - backupRemovalService := backup.NewRemovalService(db, minioClient) - pitrTimerangeService := backup.NewPITRTimerangeService(minioClient) + pbmPITRService := backup.NewPBMPITRService() + backupRemovalService := backup.NewRemovalService(db, pbmPITRService) backupRetentionService := backup.NewRetentionService(db, backupRemovalService) prom.MustRegister(agentsRegistry) @@ -927,7 +930,7 @@ func main() { versioner := agents.NewVersionerService(agentsRegistry) dbaasClient := dbaas.NewClient(*dbaasControllerAPIAddrF) compatibilityService := backup.NewCompatibilityService(db, versioner) - backupService := backup.NewService(db, jobsService, agentService, compatibilityService, pitrTimerangeService) + backupService := backup.NewService(db, jobsService, agentService, compatibilityService, pbmPITRService) schedulerService := scheduler.New(db, backupService) versionCache := versioncache.New(db, versioner) emailer := alertmanager.NewEmailer(logrus.WithField("component", "alertmanager-emailer").Logger) @@ -971,11 +974,11 @@ func main() { select { case s := <-terminationSignals: signal.Stop(terminationSignals) - l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) + l.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert cancel() return case s := <-updateSignals: - l.Infof("Got %s, reloading configuration...", unix.SignalName(s.(unix.Signal))) + l.Infof("Got %s, reloading configuration...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert err := server.UpdateConfigurations(ctx) if err != nil { l.Warnf("Couldn't reload configuration: %s", err) @@ -1115,7 +1118,7 @@ func main() { backupService: backupService, compatibilityService: compatibilityService, backupRemovalService: backupRemovalService, - pitrTimerangeService: pitrTimerangeService, + pbmPITRService: pbmPITRService, minioClient: minioClient, versionCache: versionCache, supervisord: supervisord, diff --git a/managed/models/action_helpers.go b/managed/models/action_helpers.go index c3fece6e27..a61f08bdca 100644 --- a/managed/models/action_helpers.go +++ b/managed/models/action_helpers.go @@ -32,14 +32,15 @@ func FindActionResultByID(q *reform.Querier, id string) (*ActionResult, error) { } res := &ActionResult{ID: id} - switch err := q.Reload(res); err { - case nil: - return res, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "ActionResult with ID %q not found.", id) - default: + err := q.Reload(res) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "ActionResult with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return res, nil } // CreateActionResult stores an action result in action results storage. diff --git a/managed/models/agent_helpers.go b/managed/models/agent_helpers.go index 5badb05aa5..59e1c93ac4 100644 --- a/managed/models/agent_helpers.go +++ b/managed/models/agent_helpers.go @@ -148,14 +148,15 @@ func checkUniqueAgentID(q *reform.Querier, id string) error { } agent := &Agent{AgentID: id} - switch err := q.Reload(agent); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Agent with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(agent) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Agent with ID %q already exists.", id) } // AgentFilters represents filters for agents list. @@ -215,7 +216,7 @@ func FindAgents(q *reform.Querier, filters AgentFilters) ([]*Agent, error) { agents := make([]*Agent, len(structs)) for i, s := range structs { - agents[i] = s.(*Agent) + agents[i] = s.(*Agent) //nolint:forcetypeassert } return agents, nil @@ -228,14 +229,15 @@ func FindAgentByID(q *reform.Querier, id string) (*Agent, error) { } agent := &Agent{AgentID: id} - switch err := q.Reload(agent); err { - case nil: - return agent, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Agent with ID %q not found.", id) - default: + err := q.Reload(agent) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Agent with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return agent, nil } // FindAgentsByIDs finds Agents by IDs. @@ -257,7 +259,7 @@ func FindAgentsByIDs(q *reform.Querier, ids []string) ([]*Agent, error) { res := make([]*Agent, len(structs)) for i, s := range structs { - res[i] = s.(*Agent) + res[i] = s.(*Agent) //nolint:forcetypeassert } return res, nil } @@ -308,7 +310,7 @@ func FindDBConfigForService(q *reform.Querier, serviceID string) (*DBConfig, err res := make([]*Agent, len(structs)) for i, s := range structs { - res[i] = s.(*Agent) + res[i] = s.(*Agent) //nolint:forcetypeassert } if len(res) == 0 { @@ -335,7 +337,7 @@ func FindPMMAgentsRunningOnNode(q *reform.Querier, nodeID string) ([]*Agent, err res := make([]*Agent, 0, len(structs)) for _, str := range structs { - row := str.(*Agent) + row := str.(*Agent) //nolint:forcetypeassert res = append(res, row) } @@ -356,7 +358,7 @@ func FindPMMAgentsForService(q *reform.Querier, serviceID string) ([]*Agent, err } pmmAgentIDs := make([]interface{}, len(allAgents)) for _, str := range allAgents { - row := str.(*Agent) + row := str.(*Agent) //nolint:forcetypeassert if row.PMMAgentID != nil { for _, a := range pmmAgentIDs { if a == *row.PMMAgentID { @@ -380,7 +382,7 @@ func FindPMMAgentsForService(q *reform.Querier, serviceID string) ([]*Agent, err } res := make([]*Agent, 0, len(pmmAgentRecords)) for _, str := range pmmAgentRecords { - row := str.(*Agent) + row := str.(*Agent) //nolint:forcetypeassert res = append(res, row) } @@ -396,7 +398,7 @@ func FindPMMAgentsForServicesOnNode(q *reform.Querier, nodeID string) ([]*Agent, allAgents := make([]*Agent, 0, len(structs)) for _, str := range structs { - serviceID := str.(*Service).ServiceID + serviceID := str.(*Service).ServiceID //nolint:forcetypeassert agents, err := FindPMMAgentsForService(q, serviceID) if err != nil { return nil, errors.WithStack(err) @@ -462,7 +464,7 @@ func FindAgentsForScrapeConfig(q *reform.Querier, pmmAgentID *string, pushMetric res := make([]*Agent, len(allAgents)) for i, s := range allAgents { - res[i] = s.(*Agent) + res[i] = s.(*Agent) //nolint:forcetypeassert } return res, nil } @@ -477,7 +479,7 @@ func FindPMMAgentsIDsWithPushMetrics(q *reform.Querier) ([]string, error) { uniqAgents := make(map[string]struct{}) res := make([]string, 0, len(structs)) for _, str := range structs { - row := pointer.GetString(str.(*Agent).PMMAgentID) + row := pointer.GetString(str.(*Agent).PMMAgentID) //nolint:forcetypeassert if _, ok := uniqAgents[row]; ok { continue } @@ -953,7 +955,7 @@ func RemoveAgent(q *reform.Querier, id string, mode RemoveMode) (*Agent, error) return nil, status.Errorf(codes.FailedPrecondition, "pmm-agent with ID %q has agents.", id) case RemoveCascade: for _, str := range structs { - agentID := str.(*Agent).AgentID + agentID := str.(*Agent).AgentID //nolint:forcetypeassert if _, err = RemoveAgent(q, agentID, RemoveRestrict); err != nil { return nil, err } diff --git a/managed/models/artifact_helpers.go b/managed/models/artifact_helpers.go index b2162e495d..382d153bf0 100644 --- a/managed/models/artifact_helpers.go +++ b/managed/models/artifact_helpers.go @@ -38,7 +38,7 @@ type ArtifactFilters struct { Status BackupStatus } -// FindArtifacts returns artifacts list. +// FindArtifacts returns artifact list sorted by creation time in DESCENDING order. func FindArtifacts(q *reform.Querier, filters ArtifactFilters) ([]*Artifact, error) { var conditions []string var args []interface{} @@ -80,7 +80,7 @@ func FindArtifacts(q *reform.Querier, filters ArtifactFilters) ([]*Artifact, err artifacts := make([]*Artifact, 0, len(rows)) for _, r := range rows { - artifacts = append(artifacts, r.(*Artifact)) + artifacts = append(artifacts, r.(*Artifact)) //nolint:forcetypeassert } return artifacts, nil @@ -106,7 +106,7 @@ func FindArtifactsByIDs(q *reform.Querier, ids []string) (map[string]*Artifact, artifacts := make(map[string]*Artifact, len(all)) for _, l := range all { - artifact := l.(*Artifact) + artifact := l.(*Artifact) //nolint:forcetypeassert artifacts[artifact.ID] = artifact } return artifacts, nil @@ -119,14 +119,15 @@ func FindArtifactByID(q *reform.Querier, id string) (*Artifact, error) { } artifact := &Artifact{ID: id} - switch err := q.Reload(artifact); err { - case nil: - return artifact, nil - case reform.ErrNoRows: - return nil, errors.Wrapf(ErrNotFound, "artifact by id '%s'", id) - default: + err := q.Reload(artifact) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "artifact by id '%s'", id) + } return nil, errors.WithStack(err) } + + return artifact, nil } // FindArtifactByName returns artifact by given name if found, ErrNotFound if not. @@ -136,14 +137,14 @@ func FindArtifactByName(q *reform.Querier, name string) (*Artifact, error) { } artifact := &Artifact{} err := q.FindOneTo(artifact, "name", name) - switch err { - case nil: - return artifact, nil - case reform.ErrNoRows: - return nil, errors.Wrapf(ErrNotFound, "backup artifact with name %q not found.", name) - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "backup artifact with name %q not found.", name) + } return nil, errors.WithStack(err) } + + return artifact, nil } func checkUniqueArtifactName(q *reform.Querier, name string) error { @@ -152,27 +153,30 @@ func checkUniqueArtifactName(q *reform.Querier, name string) error { } var artifact Artifact - switch err := q.FindOneTo(&artifact, "name", name); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Artifact with name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + err := q.FindOneTo(&artifact, "name", name) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Artifact with name %q already exists.", name) } // CreateArtifactParams are params for creating a new artifact. type CreateArtifactParams struct { - Name string - Vendor string - DBVersion string - LocationID string - ServiceID string - DataModel DataModel - Mode BackupMode - Status BackupStatus - ScheduleID string + Name string + Vendor string + DBVersion string + LocationID string + ServiceID string + DataModel DataModel + Mode BackupMode + Status BackupStatus + ScheduleID string + IsShardedCluster bool + Folder string } // Validate validates params used for creating an artifact entry. @@ -222,17 +226,19 @@ func CreateArtifact(q *reform.Querier, params CreateArtifactParams) (*Artifact, } row := &Artifact{ - ID: id, - Name: params.Name, - Vendor: params.Vendor, - DBVersion: params.DBVersion, - LocationID: params.LocationID, - ServiceID: params.ServiceID, - DataModel: params.DataModel, - Mode: params.Mode, - Status: params.Status, - Type: OnDemandArtifactType, - ScheduleID: params.ScheduleID, + ID: id, + Name: params.Name, + Vendor: params.Vendor, + DBVersion: params.DBVersion, + LocationID: params.LocationID, + ServiceID: params.ServiceID, + DataModel: params.DataModel, + Mode: params.Mode, + Status: params.Status, + Type: OnDemandArtifactType, + ScheduleID: params.ScheduleID, + IsShardedCluster: params.IsShardedCluster, + Folder: params.Folder, } if params.ScheduleID != "" { @@ -248,9 +254,12 @@ func CreateArtifact(q *reform.Querier, params CreateArtifactParams) (*Artifact, // UpdateArtifactParams are params for changing existing artifact. type UpdateArtifactParams struct { - ServiceID *string - Status *BackupStatus - ScheduleID *string + ServiceID *string + Status *BackupStatus + ScheduleID *string + IsShardedCluster bool + Metadata *Metadata + Folder *string } // UpdateArtifact updates existing artifact. @@ -269,6 +278,19 @@ func UpdateArtifact(q *reform.Querier, artifactID string, params UpdateArtifactP row.ScheduleID = *params.ScheduleID } + if params.IsShardedCluster && !row.IsShardedCluster { + row.IsShardedCluster = true + } + + if params.Metadata != nil { + // We're appending to existing list to cover PITR mode cases. + row.MetadataList = append(row.MetadataList, *params.Metadata) + } + + if params.Folder != nil { + row.Folder = *params.Folder + } + if err := q.Update(row); err != nil { return nil, errors.Wrap(err, "failed to update backup artifact") } @@ -287,3 +309,27 @@ func DeleteArtifact(q *reform.Querier, id string) error { } return nil } + +// MetadataRemoveFirstN removes first N records from artifact metadata list. +func (s *Artifact) MetadataRemoveFirstN(q *reform.Querier, n uint32) error { + if n > uint32(len(s.MetadataList)) { + n = uint32(len(s.MetadataList)) + } + s.MetadataList = s.MetadataList[n:] + if err := q.Update(s); err != nil { + return errors.Wrap(err, "failed to remove artifact metadata records") + } + return nil +} + +// IsArtifactFinalStatus checks if artifact status is one of the final ones. +func IsArtifactFinalStatus(backupStatus BackupStatus) bool { + switch backupStatus { + case SuccessBackupStatus, + ErrorBackupStatus, + FailedToDeleteBackupStatus: + return true + default: + return false + } +} diff --git a/managed/models/artifact_helpers_test.go b/managed/models/artifact_helpers_test.go index 26ef4bc9fa..e459e7c6a7 100644 --- a/managed/models/artifact_helpers_test.go +++ b/managed/models/artifact_helpers_test.go @@ -88,7 +88,7 @@ func TestArtifacts(t *testing.T) { } } - t.Run("create", func(t *testing.T) { + t.Run("create and update", func(t *testing.T) { tx, err := db.Begin() require.NoError(t, err) t.Cleanup(func() { @@ -98,7 +98,7 @@ func TestArtifacts(t *testing.T) { q := tx.Querier prepareLocationsAndService(q) - params := models.CreateArtifactParams{ + createParams := models.CreateArtifactParams{ Name: "backup_name", Vendor: "MySQL", LocationID: locationID1, @@ -106,17 +106,36 @@ func TestArtifacts(t *testing.T) { DataModel: models.PhysicalDataModel, Status: models.PendingBackupStatus, Mode: models.Snapshot, + Folder: "artifact_folder", } - a, err := models.CreateArtifact(q, params) + a, err := models.CreateArtifact(q, createParams) require.NoError(t, err) - assert.Equal(t, params.Name, a.Name) - assert.Equal(t, params.Vendor, a.Vendor) - assert.Equal(t, params.LocationID, a.LocationID) - assert.Equal(t, params.ServiceID, a.ServiceID) - assert.Equal(t, params.DataModel, a.DataModel) - assert.Equal(t, params.Status, a.Status) + require.NotNil(t, a) + assert.Equal(t, createParams.Name, a.Name) + assert.Equal(t, createParams.Vendor, a.Vendor) + assert.Equal(t, createParams.LocationID, a.LocationID) + assert.Equal(t, createParams.ServiceID, a.ServiceID) + assert.Equal(t, createParams.DataModel, a.DataModel) + assert.Equal(t, createParams.Status, a.Status) + assert.Equal(t, createParams.Folder, a.Folder) assert.Less(t, time.Now().UTC().Unix()-a.CreatedAt.Unix(), int64(5)) + + updateParams := models.UpdateArtifactParams{ + Status: models.SuccessBackupStatus.Pointer(), + ScheduleID: pointer.ToString("schedule_id"), + ServiceID: &serviceID2, + IsShardedCluster: true, + } + + a, err = models.UpdateArtifact(q, a.ID, updateParams) + require.NoError(t, err) + require.NotNil(t, a) + assert.Equal(t, *updateParams.Status, a.Status) + assert.Equal(t, *updateParams.ScheduleID, a.ScheduleID) + assert.Equal(t, *updateParams.ServiceID, a.ServiceID) + assert.Equal(t, updateParams.IsShardedCluster, a.IsShardedCluster) + assert.Less(t, time.Now().UTC().Unix()-a.UpdatedAt.Unix(), int64(5)) }) t.Run("list", func(t *testing.T) { @@ -201,6 +220,55 @@ func TestArtifacts(t *testing.T) { require.NoError(t, err) assert.Empty(t, artifacts) }) + + t.Run("MetadataRemoveFirstN", func(t *testing.T) { + tx, err := db.Begin() + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, tx.Rollback()) + }) + + q := tx.Querier + prepareLocationsAndService(q) + + params := models.CreateArtifactParams{ + Name: "backup_name", + Vendor: "MongoDB", + LocationID: locationID1, + ServiceID: serviceID1, + DataModel: models.LogicalDataModel, + Status: models.SuccessBackupStatus, + Mode: models.PITR, + } + + a, err := models.CreateArtifact(q, params) + require.NotNil(t, a) + require.NoError(t, err) + + a, err = models.UpdateArtifact(q, a.ID, models.UpdateArtifactParams{Metadata: &models.Metadata{FileList: []models.File{{Name: "file1"}}}}) + require.NoError(t, err) + + a, err = models.UpdateArtifact(q, a.ID, models.UpdateArtifactParams{Metadata: &models.Metadata{FileList: []models.File{{Name: "file2"}}}}) + require.NoError(t, err) + + a, err = models.UpdateArtifact(q, a.ID, models.UpdateArtifactParams{Metadata: &models.Metadata{FileList: []models.File{{Name: "file3"}}}}) + require.NoError(t, err) + + a, err = models.UpdateArtifact(q, a.ID, models.UpdateArtifactParams{Metadata: &models.Metadata{FileList: []models.File{{Name: "file4"}}}}) + require.NoError(t, err) + + err = a.MetadataRemoveFirstN(q, 0) + require.NoError(t, err) + assert.Equal(t, 4, len(a.MetadataList)) + + err = a.MetadataRemoveFirstN(q, 3) + require.NoError(t, err) + assert.Equal(t, 1, len(a.MetadataList)) + + err = a.MetadataRemoveFirstN(q, 10) + require.NoError(t, err) + assert.Equal(t, 0, len(a.MetadataList)) + }) } func TestArtifactValidation(t *testing.T) { diff --git a/managed/models/artifact_model.go b/managed/models/artifact_model.go index 4373e43dc6..6c879d4d24 100644 --- a/managed/models/artifact_model.go +++ b/managed/models/artifact_model.go @@ -16,6 +16,7 @@ package models import ( + "database/sql/driver" "time" "gopkg.in/reform.v1" @@ -58,6 +59,7 @@ const ( ErrorBackupStatus BackupStatus = "error" DeletingBackupStatus BackupStatus = "deleting" FailedToDeleteBackupStatus BackupStatus = "failed_to_delete" + CleanupInProgressStatus BackupStatus = "cleanup_in_progress" ) // Validate validates backup status. @@ -77,9 +79,9 @@ func (bs BackupStatus) Validate() error { return nil } -// BackupStatusPointer returns a pointer of backup status. -func BackupStatusPointer(status BackupStatus) *BackupStatus { - return &status +// Pointer returns a pointer to status value. +func (bs BackupStatus) Pointer() *BackupStatus { + return &bs } // ArtifactType represents type how artifact was created. @@ -116,23 +118,58 @@ func (m BackupMode) Validate() error { return nil } +// File represents file or directory. +type File struct { + Name string `json:"name"` + IsDirectory bool `json:"is_directory"` +} + +// PbmMetadata contains extra data for pbm cli tool. +type PbmMetadata struct { + // Name of backup in pbm representation. + Name string `json:"name"` +} + +// BackupToolData contains extra data for backup tools. +type BackupToolData struct { + PbmMetadata *PbmMetadata +} + +// Metadata contains extra artifact data like files it consists of, tool specific data, etc. +type Metadata struct { + FileList []File `json:"file_list"` + RestoreTo *time.Time `json:"restore_to"` + BackupToolData *BackupToolData `json:"backup_tool_data"` +} + +type MetadataList []Metadata + +// Value implements database/sql/driver.Valuer interface. Should be defined on the value. +func (p MetadataList) Value() (driver.Value, error) { return jsonValue(p) } + +// Scan implements database/sql.Scanner interface. Should be defined on the pointer. +func (p *MetadataList) Scan(src interface{}) error { return jsonScan(p, src) } + // Artifact represents result of a backup. // //reform:artifacts type Artifact struct { - ID string `reform:"id,pk"` - Name string `reform:"name"` - Vendor string `reform:"vendor"` - DBVersion string `reform:"db_version"` - LocationID string `reform:"location_id"` - ServiceID string `reform:"service_id"` - DataModel DataModel `reform:"data_model"` - Mode BackupMode `reform:"mode"` - Status BackupStatus `reform:"status"` - Type ArtifactType `reform:"type"` - ScheduleID string `reform:"schedule_id"` - CreatedAt time.Time `reform:"created_at"` - UpdatedAt time.Time `reform:"updated_at"` + ID string `reform:"id,pk"` + Name string `reform:"name"` + Vendor string `reform:"vendor"` + DBVersion string `reform:"db_version"` + LocationID string `reform:"location_id"` + ServiceID string `reform:"service_id"` + DataModel DataModel `reform:"data_model"` + Mode BackupMode `reform:"mode"` + Status BackupStatus `reform:"status"` + Type ArtifactType `reform:"type"` + ScheduleID string `reform:"schedule_id"` + CreatedAt time.Time `reform:"created_at"` + UpdatedAt time.Time `reform:"updated_at"` + IsShardedCluster bool `reform:"is_sharded_cluster"` + Folder string `reform:"folder"` + MetadataList MetadataList `reform:"metadata_list"` } // BeforeInsert implements reform.BeforeInserter interface. diff --git a/managed/models/artifact_model_reform.go b/managed/models/artifact_model_reform.go index 9e782a21c4..6bd816c51b 100644 --- a/managed/models/artifact_model_reform.go +++ b/managed/models/artifact_model_reform.go @@ -41,6 +41,9 @@ func (v *artifactTableType) Columns() []string { "schedule_id", "created_at", "updated_at", + "is_sharded_cluster", + "folder", + "metadata_list", } } @@ -78,6 +81,9 @@ var ArtifactTable = &artifactTableType{ {Name: "ScheduleID", Type: "string", Column: "schedule_id"}, {Name: "CreatedAt", Type: "time.Time", Column: "created_at"}, {Name: "UpdatedAt", Type: "time.Time", Column: "updated_at"}, + {Name: "IsShardedCluster", Type: "bool", Column: "is_sharded_cluster"}, + {Name: "Folder", Type: "string", Column: "folder"}, + {Name: "MetadataList", Type: "MetadataList", Column: "metadata_list"}, }, PKFieldIndex: 0, }, @@ -86,7 +92,7 @@ var ArtifactTable = &artifactTableType{ // String returns a string representation of this struct or record. func (s Artifact) String() string { - res := make([]string, 13) + res := make([]string, 16) res[0] = "ID: " + reform.Inspect(s.ID, true) res[1] = "Name: " + reform.Inspect(s.Name, true) res[2] = "Vendor: " + reform.Inspect(s.Vendor, true) @@ -100,6 +106,9 @@ func (s Artifact) String() string { res[10] = "ScheduleID: " + reform.Inspect(s.ScheduleID, true) res[11] = "CreatedAt: " + reform.Inspect(s.CreatedAt, true) res[12] = "UpdatedAt: " + reform.Inspect(s.UpdatedAt, true) + res[13] = "IsShardedCluster: " + reform.Inspect(s.IsShardedCluster, true) + res[14] = "Folder: " + reform.Inspect(s.Folder, true) + res[15] = "MetadataList: " + reform.Inspect(s.MetadataList, true) return strings.Join(res, ", ") } @@ -120,6 +129,9 @@ func (s *Artifact) Values() []interface{} { s.ScheduleID, s.CreatedAt, s.UpdatedAt, + s.IsShardedCluster, + s.Folder, + s.MetadataList, } } @@ -140,6 +152,9 @@ func (s *Artifact) Pointers() []interface{} { &s.ScheduleID, &s.CreatedAt, &s.UpdatedAt, + &s.IsShardedCluster, + &s.Folder, + &s.MetadataList, } } diff --git a/managed/models/channel_helpers.go b/managed/models/channel_helpers.go index 86dc7cf21f..c39f9a24e4 100644 --- a/managed/models/channel_helpers.go +++ b/managed/models/channel_helpers.go @@ -34,14 +34,15 @@ func checkUniqueChannelID(q *reform.Querier, id string) error { } agent := &Channel{ID: id} - switch err := q.Reload(agent); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Channel with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(agent) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Channel with ID %q already exists.", id) } func checkEmailConfig(c *EmailConfig) error { @@ -117,7 +118,7 @@ func FindChannels(q *reform.Querier) ([]*Channel, error) { channels := make([]*Channel, len(rows)) for i, s := range rows { - channels[i] = s.(*Channel) + channels[i] = s.(*Channel) //nolint:forcetypeassert } return channels, nil @@ -132,7 +133,7 @@ func FindChannelsOnPage(q *reform.Querier, pageIndex, pageSize int) ([]*Channel, channels := make([]*Channel, len(rows)) for i, s := range rows { - channels[i] = s.(*Channel) + channels[i] = s.(*Channel) //nolint:forcetypeassert } return channels, nil @@ -155,14 +156,15 @@ func FindChannelByID(q *reform.Querier, id string) (*Channel, error) { } channel := &Channel{ID: id} - switch err := q.Reload(channel); err { - case nil: - return channel, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Channel with ID %q not found.", id) - default: + err := q.Reload(channel) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Channel with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return channel, nil } // FindChannelsByIDs finds channels by IDs. @@ -185,7 +187,7 @@ func FindChannelsByIDs(q *reform.Querier, ids []string) ([]*Channel, error) { res := make([]*Channel, len(structs)) for i, s := range structs { - res[i] = s.(*Channel) + res[i] = s.(*Channel) //nolint:forcetypeassert } return res, nil } @@ -379,12 +381,11 @@ func RemoveChannel(q *reform.Querier, id string) error { func channelInUse(q *reform.Querier, id string) (bool, error) { _, err := q.SelectOneFrom(RuleTable, "WHERE channel_ids ? $1", id) - switch err { - case nil: - return true, nil - case reform.ErrNoRows: - return false, nil - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return false, nil + } return false, errors.WithStack(err) } + return true, nil } diff --git a/managed/models/check_settings_helper.go b/managed/models/check_settings_helper.go index 2ac3f1e583..ad9d09ac97 100644 --- a/managed/models/check_settings_helper.go +++ b/managed/models/check_settings_helper.go @@ -25,19 +25,19 @@ import ( // FindCheckSettings returns all CheckSettings stored in the table. func FindCheckSettings(q *reform.Querier) (map[string]Interval, error) { rows, err := q.SelectAllFrom(CheckSettingsTable, "") - switch err { - case nil: - cs := make(map[string]Interval) - for _, r := range rows { - state := r.(*CheckSettings) - cs[state.Name] = state.Interval + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, err } - return cs, nil - case reform.ErrNoRows: - return nil, err - default: return nil, errors.WithStack(err) } + + cs := make(map[string]Interval) + for _, r := range rows { + state := r.(*CheckSettings) //nolint:forcetypeassert + cs[state.Name] = state.Interval + } + return cs, nil } // FindCheckSettingsByName finds CheckSettings by check name. @@ -47,14 +47,15 @@ func FindCheckSettingsByName(q *reform.Querier, name string) (*CheckSettings, er } cs := &CheckSettings{Name: name} - switch err := q.Reload(cs); err { - case nil: - return cs, nil - case reform.ErrNoRows: - return nil, err - default: + err := q.Reload(cs) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, err + } return nil, errors.WithStack(err) } + + return cs, nil } // CreateCheckSettings persists CheckSettings. diff --git a/managed/models/database.go b/managed/models/database.go index 729d50f265..62649c3408 100644 --- a/managed/models/database.go +++ b/managed/models/database.go @@ -891,6 +891,19 @@ var databaseSchema = [][]string{ `ALTER TABLE kubernetes_clusters ADD COLUMN pgbouncer JSONB`, `ALTER TABLE kubernetes_clusters ADD COLUMN pgbackrest JSONB`, }, + 81: { + `ALTER TABLE artifacts + ADD COLUMN is_sharded_cluster BOOLEAN NOT NULL DEFAULT FALSE`, + }, + 82: { + `ALTER TABLE artifacts + ADD COLUMN folder VARCHAR NOT NULL DEFAULT '', + ADD COLUMN metadata_list JSONB; + + UPDATE scheduled_tasks + SET data = jsonb_set(data, '{mongodb_backup, folder}', data->'mongodb_backup'->'name') + WHERE type = 'mongodb_backup';`, + }, } // ^^^ Avoid default values in schema definition. ^^^ @@ -969,7 +982,7 @@ func SetupDB(ctx context.Context, sqlDB *sql.DB, params SetupDBParams) (*reform. db := reform.NewDB(sqlDB, postgresql.Dialect, logger) errCV := checkVersion(ctx, db) - if pErr, ok := errCV.(*pq.Error); ok && pErr.Code == "28000" { + if pErr, ok := errCV.(*pq.Error); ok && pErr.Code == "28000" { //nolint:errorlint // invalid_authorization_specification (see https://www.postgresql.org/docs/current/errcodes-appendix.html) if err := initWithRoot(params); err != nil { return nil, errors.Wrapf(err, "couldn't connect to database with provided credentials. Tried to create user and database. Error: %s", errCV) @@ -1049,7 +1062,8 @@ func initWithRoot(params SetupDBParams) error { func migrateDB(db *reform.DB, params SetupDBParams) error { var currentVersion int errDB := db.QueryRow("SELECT id FROM schema_migrations ORDER BY id DESC LIMIT 1").Scan(¤tVersion) - if pErr, ok := errDB.(*pq.Error); ok && pErr.Code == "42P01" { // undefined_table (see https://www.postgresql.org/docs/current/errcodes-appendix.html) + // undefined_table (see https://www.postgresql.org/docs/current/errcodes-appendix.html) + if pErr, ok := errDB.(*pq.Error); ok && pErr.Code == "42P01" { //nolint:errorlint errDB = nil } if errDB != nil { diff --git a/managed/models/database_test.go b/managed/models/database_test.go index d991b3e1ad..cb6a900ee7 100644 --- a/managed/models/database_test.go +++ b/managed/models/database_test.go @@ -40,7 +40,7 @@ func assertUniqueViolation(t *testing.T, err error, constraint string) { t.Helper() require.IsType(t, &pq.Error{}, err) - pgErr := err.(*pq.Error) + pgErr := err.(*pq.Error) //nolint:errorlint assert.EqualValues(t, pq.ErrorCode("23505"), pgErr.Code) assert.Equal(t, fmt.Sprintf(`duplicate key value violates unique constraint %q`, constraint), pgErr.Message) } @@ -49,7 +49,7 @@ func assertCheckViolation(t *testing.T, err error, table, constraint string) { / t.Helper() require.IsType(t, &pq.Error{}, err) - pgErr := err.(*pq.Error) + pgErr := err.(*pq.Error) //nolint:errorlint assert.EqualValues(t, pq.ErrorCode("23514"), pgErr.Code) assert.Equal(t, fmt.Sprintf(`new row for relation %q violates check constraint %q`, table, constraint), pgErr.Message) } diff --git a/managed/models/job_helpers.go b/managed/models/job_helpers.go index 6a54f94310..1762c7b34f 100644 --- a/managed/models/job_helpers.go +++ b/managed/models/job_helpers.go @@ -39,14 +39,15 @@ func FindJobByID(q *reform.Querier, id string) (*Job, error) { res := &Job{ID: id} - switch err := q.Reload(res); err { - case nil: - return res, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Job with ID %q not found.", id) - default: + err := q.Reload(res) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Job with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return res, nil } // JobsFilter represents filter for jobs. @@ -102,7 +103,7 @@ func FindJobs(q *reform.Querier, filters JobsFilter) ([]*Job, error) { } jobs := make([]*Job, len(structs)) for i, s := range structs { - jobs[i] = s.(*Job) + jobs[i] = s.(*Job) //nolint:forcetypeassert } return jobs, nil } @@ -205,7 +206,7 @@ func FindJobLogs(q *reform.Querier, filters JobLogsFilter) ([]*JobLog, error) { logs := make([]*JobLog, 0, len(rows)) for _, r := range rows { - logs = append(logs, r.(*JobLog)) + logs = append(logs, r.(*JobLog)) //nolint:forcetypeassert } return logs, nil } diff --git a/managed/models/kubernetes_helpers.go b/managed/models/kubernetes_helpers.go index c5cd28341d..7c2a2d9447 100644 --- a/managed/models/kubernetes_helpers.go +++ b/managed/models/kubernetes_helpers.go @@ -29,14 +29,15 @@ func checkUniqueKubernetesClusterID(q *reform.Querier, id string) error { } cluster := &KubernetesCluster{ID: id} - switch err := q.Reload(cluster); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Kubernetes Cluster with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(cluster) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Kubernetes Cluster with ID %q already exists.", id) } func checkUniqueKubernetesClusterName(q *reform.Querier, name string) error { @@ -44,14 +45,15 @@ func checkUniqueKubernetesClusterName(q *reform.Querier, name string) error { return status.Error(codes.InvalidArgument, "empty Kubernetes Cluster Name.") } - switch _, err := q.FindOneFrom(KubernetesClusterTable, "kubernetes_cluster_name", name); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Kubernetes Cluster with Name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + _, err := q.FindOneFrom(KubernetesClusterTable, "kubernetes_cluster_name", name) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Kubernetes Cluster with Name %q already exists.", name) } // FindAllKubernetesClusters returns all Kubernetes clusters. @@ -63,7 +65,7 @@ func FindAllKubernetesClusters(q *reform.Querier) ([]*KubernetesCluster, error) clusters := make([]*KubernetesCluster, len(structs)) for i, s := range structs { - clusters[i] = s.(*KubernetesCluster) + clusters[i] = s.(*KubernetesCluster) //nolint:forcetypeassert } return clusters, nil @@ -75,14 +77,15 @@ func FindKubernetesClusterByName(q *reform.Querier, name string) (*KubernetesClu return nil, status.Error(codes.InvalidArgument, "Empty Kubernetes Cluster Name.") } - switch cluster, err := q.FindOneFrom(KubernetesClusterTable, "kubernetes_cluster_name", name); err { - case nil: - return cluster.(*KubernetesCluster), nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Kubernetes Cluster with name %q not found.", name) - default: + cluster, err := q.FindOneFrom(KubernetesClusterTable, "kubernetes_cluster_name", name) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Kubernetes Cluster with name %q not found.", name) + } return nil, errors.WithStack(err) } + + return cluster.(*KubernetesCluster), nil //nolint:forcetypeassert } // CreateKubernetesClusterParams contains all params required to create Kubernetes cluster. diff --git a/managed/models/location_helpers.go b/managed/models/location_helpers.go index 22360c7490..de7da55a21 100644 --- a/managed/models/location_helpers.go +++ b/managed/models/location_helpers.go @@ -18,6 +18,8 @@ package models import ( "fmt" "net/url" + "path/filepath" + "regexp" "strings" "github.com/google/uuid" @@ -27,20 +29,23 @@ import ( "gopkg.in/reform.v1" ) +var pathRe = regexp.MustCompile(`^[\.:\/\w-]*$`) // Dots, slashes, letters, digits, underscores, dashes. + func checkUniqueBackupLocationID(q *reform.Querier, id string) error { if id == "" { panic("empty Location ID") } location := &BackupLocation{ID: id} - switch err := q.Reload(location); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Location with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(location) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Location with ID %q already exists.", id) } func checkUniqueBackupLocationName(q *reform.Querier, name string) error { @@ -49,14 +54,15 @@ func checkUniqueBackupLocationName(q *reform.Querier, name string) error { } var location BackupLocation - switch err := q.FindOneTo(&location, "name", name); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Location with name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + err := q.FindOneTo(&location, "name", name) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Location with name %q already exists.", name) } func checkFilesystemLocationConfig(c *FilesystemLocationConfig) error { @@ -66,6 +72,20 @@ func checkFilesystemLocationConfig(c *FilesystemLocationConfig) error { if c.Path == "" { return status.Error(codes.InvalidArgument, "PMM client config path field is empty.") } + + canonical := filepath.Clean(c.Path) + if canonical != c.Path { + return status.Errorf(codes.InvalidArgument, "Specified folder in non-canonical format, canonical would be: %q.", canonical) + } + + if !strings.HasPrefix(c.Path, "/") { + return status.Error(codes.InvalidArgument, "Folder should be an absolute path (should contain leading slash).") + } + + if !pathRe.Match([]byte(c.Path)) { + return status.Error(codes.InvalidArgument, "Filesystem path can contain only dots, colons, slashes, letters, digits, underscores and dashes.") + } + return nil } @@ -143,7 +163,7 @@ func FindBackupLocations(q *reform.Querier) ([]*BackupLocation, error) { locations := make([]*BackupLocation, len(rows)) for i, s := range rows { - locations[i] = s.(*BackupLocation) + locations[i] = s.(*BackupLocation) //nolint:forcetypeassert } return locations, nil @@ -156,14 +176,15 @@ func FindBackupLocationByID(q *reform.Querier, id string) (*BackupLocation, erro } location := &BackupLocation{ID: id} - switch err := q.Reload(location); err { - case nil: - return location, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Backup location with ID %q not found.", id) - default: + err := q.Reload(location) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "backup location with ID %q", id) + } return nil, errors.WithStack(err) } + + return location, nil } // FindBackupLocationsByIDs finds backup locations by IDs. @@ -186,7 +207,7 @@ func FindBackupLocationsByIDs(q *reform.Querier, ids []string) (map[string]*Back locations := make(map[string]*BackupLocation, len(all)) for _, l := range all { - location := l.(*BackupLocation) + location := l.(*BackupLocation) //nolint:forcetypeassert locations[location.ID] = location } return locations, nil @@ -376,6 +397,7 @@ func RemoveBackupLocation(q *reform.Querier, id string, mode RemoveMode) error { } for _, a := range artifacts { + // TODO removing artifact this way is not correct. Should be done via calling "removal service". if err := DeleteArtifact(q, a.ID); err != nil { return err } diff --git a/managed/models/location_helpers_test.go b/managed/models/location_helpers_test.go index f145973cc8..032240a1d1 100644 --- a/managed/models/location_helpers_test.go +++ b/managed/models/location_helpers_test.go @@ -365,7 +365,7 @@ func TestCreateBackupLocationValidation(t *testing.T) { Name: "client-1", BackupLocationConfig: models.BackupLocationConfig{ FilesystemConfig: &models.FilesystemLocationConfig{ - Path: "/tmp", + Path: "/tmp:dir_.-123", }, }, }, @@ -383,6 +383,42 @@ func TestCreateBackupLocationValidation(t *testing.T) { }, errorMsg: "rpc error: code = InvalidArgument desc = PMM client config path field is empty.", }, + { + name: "client config - non-canonical", + params: models.CreateBackupLocationParams{ + Name: "client-3", + BackupLocationConfig: models.BackupLocationConfig{ + FilesystemConfig: &models.FilesystemLocationConfig{ + Path: "/some_directory/../../../root", + }, + }, + }, + errorMsg: "rpc error: code = InvalidArgument desc = Specified folder in non-canonical format, canonical would be: \"/root\".", + }, + { + name: "client config - not absolute path", + params: models.CreateBackupLocationParams{ + Name: "client-4", + BackupLocationConfig: models.BackupLocationConfig{ + FilesystemConfig: &models.FilesystemLocationConfig{ + Path: "../../../my_directory", + }, + }, + }, + errorMsg: "rpc error: code = InvalidArgument desc = Folder should be an absolute path (should contain leading slash).", + }, + { + name: "client config - not allowed symbols", + params: models.CreateBackupLocationParams{ + Name: "client-5", + BackupLocationConfig: models.BackupLocationConfig{ + FilesystemConfig: &models.FilesystemLocationConfig{ + Path: "/%my_directory", + }, + }, + }, + errorMsg: "rpc error: code = InvalidArgument desc = Filesystem path can contain only dots, colons, slashes, letters, digits, underscores and dashes.", + }, { name: "normal s3 config", params: models.CreateBackupLocationParams{ diff --git a/managed/models/models.go b/managed/models/models.go index f9937203bc..871e3766b3 100644 --- a/managed/models/models.go +++ b/managed/models/models.go @@ -180,6 +180,8 @@ func jsonScan(v, src interface{}) error { b = v case string: b = []byte(v) + case nil: + return nil default: return errors.Errorf("expected []byte or string, got %T (%q)", src, src) } diff --git a/managed/models/node_helpers.go b/managed/models/node_helpers.go index c381026f8b..638273634a 100644 --- a/managed/models/node_helpers.go +++ b/managed/models/node_helpers.go @@ -33,14 +33,15 @@ func checkUniqueNodeID(q *reform.Querier, id string) error { } node := &Node{NodeID: id} - switch err := q.Reload(node); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Node with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(node) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Node with ID %q already exists.", id) } func checkUniqueNodeName(q *reform.Querier, name string) error { @@ -49,14 +50,14 @@ func checkUniqueNodeName(q *reform.Querier, name string) error { } _, err := q.FindOneFrom(NodeTable, "node_name", name) - switch err { - case nil: - return status.Errorf(codes.AlreadyExists, "Node with name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Node with name %q already exists.", name) } // CheckUniqueNodeInstanceRegion checks for uniqueness of instance address and region. @@ -75,14 +76,14 @@ func CheckUniqueNodeInstanceRegion(q *reform.Querier, instance string, region *s var node Node err := q.SelectOneTo(&node, "WHERE address = $1 AND region = $2 LIMIT 1", instance, region) - switch err { - case nil: - return &node, status.Errorf(codes.AlreadyExists, "Node with instance %q and region %q already exists.", instance, *region) - case reform.ErrNoRows: - return nil, nil //nolint:nilnil - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, nil //nolint:nilnil + } return nil, errors.WithStack(err) } + + return &node, status.Errorf(codes.AlreadyExists, "Node with instance %q and region %q already exists.", instance, *region) } // NodeFilters represents filters for nodes list. @@ -106,7 +107,7 @@ func FindNodes(q *reform.Querier, filters NodeFilters) ([]*Node, error) { nodes := make([]*Node, len(structs)) for i, s := range structs { - nodes[i] = s.(*Node) + nodes[i] = s.(*Node) //nolint:forcetypeassert } return nodes, nil @@ -119,14 +120,14 @@ func FindNodeByID(q *reform.Querier, id string) (*Node, error) { } node := &Node{NodeID: id} - switch err := q.Reload(node); err { - case nil: - return node, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Node with ID %q not found.", id) - default: + err := q.Reload(node) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Node with ID %q not found.", id) + } return nil, errors.WithStack(err) } + return node, nil } // FindNodesByIDs finds Nodes by IDs. @@ -148,7 +149,7 @@ func FindNodesByIDs(q *reform.Querier, ids []string) ([]*Node, error) { res := make([]*Node, len(structs)) for i, s := range structs { - res[i] = s.(*Node) + res[i] = s.(*Node) //nolint:forcetypeassert } return res, nil } @@ -160,14 +161,15 @@ func FindNodeByName(q *reform.Querier, name string) (*Node, error) { } var node Node - switch err := q.FindOneTo(&node, "node_name", name); err { - case nil: - return &node, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Node with name %q not found.", name) - default: + err := q.FindOneTo(&node, "node_name", name) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Node with name %q not found.", name) + } return nil, errors.WithStack(err) } + + return &node, nil } // CreateNodeParams contains parameters for creating Nodes. @@ -262,7 +264,7 @@ func RemoveNode(q *reform.Querier, id string, mode RemoveMode) error { return status.Errorf(codes.FailedPrecondition, "Node with ID %q has agents.", id) case RemoveCascade: for _, str := range structs { - agentID := str.(*Agent).AgentID + agentID := str.(*Agent).AgentID //nolint:forcetypeassert if _, err = RemoveAgent(q, agentID, RemoveCascade); err != nil { return err } @@ -283,7 +285,7 @@ func RemoveNode(q *reform.Querier, id string, mode RemoveMode) error { return status.Errorf(codes.FailedPrecondition, "Node with ID %q has pmm-agent.", id) case RemoveCascade: for _, str := range structs { - agentID := str.(*Agent).AgentID + agentID := str.(*Agent).AgentID //nolint:forcetypeassert if _, err = RemoveAgent(q, agentID, RemoveCascade); err != nil { return err } @@ -304,7 +306,7 @@ func RemoveNode(q *reform.Querier, id string, mode RemoveMode) error { return status.Errorf(codes.FailedPrecondition, "Node with ID %q has services.", id) case RemoveCascade: for _, str := range structs { - serviceID := str.(*Service).ServiceID + serviceID := str.(*Service).ServiceID //nolint:forcetypeassert if err = RemoveService(q, serviceID, RemoveCascade); err != nil { return err } diff --git a/managed/models/percona_sso_model_helpers.go b/managed/models/percona_sso_model_helpers.go index 4c81cf9ba9..e91935b49b 100644 --- a/managed/models/percona_sso_model_helpers.go +++ b/managed/models/percona_sso_model_helpers.go @@ -50,7 +50,7 @@ func GetPerconaSSODetails(ctx context.Context, q *reform.Querier) (*PerconaSSODe return nil, errors.Wrap(err, "failed to get Percona SSO Details") } - details := ssoDetails.(*PerconaSSODetails) + details := ssoDetails.(*PerconaSSODetails) //nolint:forcetypeassert if details.isAccessTokenExpired() { refreshedToken, err := details.refreshAndGetAccessToken(ctx, q) if err != nil { diff --git a/managed/models/restore_history_helpers.go b/managed/models/restore_history_helpers.go index f7a30cc1ce..8680f97341 100644 --- a/managed/models/restore_history_helpers.go +++ b/managed/models/restore_history_helpers.go @@ -77,7 +77,7 @@ func FindRestoreHistoryItems(q *reform.Querier, filters RestoreHistoryItemFilter items := make([]*RestoreHistoryItem, 0, len(rows)) for _, r := range rows { - items = append(items, r.(*RestoreHistoryItem)) + items = append(items, r.(*RestoreHistoryItem)) //nolint:forcetypeassert } return items, nil @@ -90,14 +90,15 @@ func FindRestoreHistoryItemByID(q *reform.Querier, id string) (*RestoreHistoryIt } item := &RestoreHistoryItem{ID: id} - switch err := q.Reload(item); err { - case nil: - return item, nil - case reform.ErrNoRows: - return nil, errors.Wrapf(ErrNotFound, "restore history item by id '%s'", id) - default: + err := q.Reload(item) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "restore history item by id '%s'", id) + } return nil, errors.WithStack(err) } + + return item, nil } // CreateRestoreHistoryItemParams are params for creating a new restore history item. diff --git a/managed/models/restore_history_model.go b/managed/models/restore_history_model.go index d294ff144f..fba2d166a4 100644 --- a/managed/models/restore_history_model.go +++ b/managed/models/restore_history_model.go @@ -46,6 +46,11 @@ func (rs RestoreStatus) Validate() error { return nil } +// Pointer returns a pointer to status value. +func (rs RestoreStatus) Pointer() *RestoreStatus { + return &rs +} + // RestoreHistoryItem represents a restore backup history. // //reform:restore_history diff --git a/managed/models/rule_helpers.go b/managed/models/rule_helpers.go index 9211516958..38620c1c8e 100644 --- a/managed/models/rule_helpers.go +++ b/managed/models/rule_helpers.go @@ -32,14 +32,15 @@ func checkUniqueRuleID(q *reform.Querier, id string) error { } rule := &Rule{ID: id} - switch err := q.Reload(rule); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Rule with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(rule) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Rule with ID %q already exists.", id) } // FindRules returns saved alert rules configuration. @@ -51,7 +52,7 @@ func FindRules(q *reform.Querier) ([]*Rule, error) { rules := make([]*Rule, len(rows)) for i, s := range rows { - rules[i] = s.(*Rule) + rules[i] = s.(*Rule) //nolint:forcetypeassert } return rules, nil @@ -66,7 +67,7 @@ func FindRulesOnPage(q *reform.Querier, pageIndex, pageSize int) ([]*Rule, error rules := make([]*Rule, len(rows)) for i, s := range rows { - rules[i] = s.(*Rule) + rules[i] = s.(*Rule) //nolint:forcetypeassert } return rules, nil @@ -89,14 +90,15 @@ func FindRuleByID(q *reform.Querier, id string) (*Rule, error) { } rule := &Rule{ID: id} - switch err := q.Reload(rule); err { - case nil: - return rule, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Rule with ID %q not found.", id) - default: + err := q.Reload(rule) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Rule with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return rule, nil } // CreateRuleParams are params for creating new Rule. diff --git a/managed/models/scheduled_task_models.go b/managed/models/scheduled_task_models.go index e233b8be74..4e928f9f84 100644 --- a/managed/models/scheduled_task_models.go +++ b/managed/models/scheduled_task_models.go @@ -69,6 +69,7 @@ type CommonBackupTaskData struct { Mode BackupMode `json:"mode"` Retries uint32 `json:"retries"` RetryInterval time.Duration `json:"retry_interval"` + Folder string `json:"folder"` } // MySQLBackupTaskData contains data for mysql backup task. @@ -88,28 +89,28 @@ func (c ScheduledTaskData) Value() (driver.Value, error) { return jsonValue(c) } func (c *ScheduledTaskData) Scan(src interface{}) error { return jsonScan(c, src) } // BeforeInsert implements reform.BeforeInserter interface. -func (r *ScheduledTask) BeforeInsert() error { +func (s *ScheduledTask) BeforeInsert() error { now := Now() - r.CreatedAt = now - r.UpdatedAt = now + s.CreatedAt = now + s.UpdatedAt = now return nil } // BeforeUpdate implements reform.BeforeUpdater interface. -func (r *ScheduledTask) BeforeUpdate() error { - r.UpdatedAt = Now() +func (s *ScheduledTask) BeforeUpdate() error { + s.UpdatedAt = Now() return nil } // AfterFind implements reform.AfterFinder interface. -func (r *ScheduledTask) AfterFind() error { - r.CreatedAt = r.CreatedAt.UTC() - r.UpdatedAt = r.UpdatedAt.UTC() - r.StartAt = r.StartAt.UTC() - r.NextRun = r.NextRun.UTC() - r.LastRun = r.LastRun.UTC() +func (s *ScheduledTask) AfterFind() error { + s.CreatedAt = s.CreatedAt.UTC() + s.UpdatedAt = s.UpdatedAt.UTC() + s.StartAt = s.StartAt.UTC() + s.NextRun = s.NextRun.UTC() + s.LastRun = s.LastRun.UTC() return nil } diff --git a/managed/models/scheduled_tasks_helpers.go b/managed/models/scheduled_tasks_helpers.go index b486d5b19f..f7c5f51bf0 100644 --- a/managed/models/scheduled_tasks_helpers.go +++ b/managed/models/scheduled_tasks_helpers.go @@ -35,14 +35,15 @@ func FindScheduledTaskByID(q *reform.Querier, id string) (*ScheduledTask, error) } res := &ScheduledTask{ID: id} - switch err := q.Reload(res); err { - case nil: - return res, nil - case reform.ErrNoRows: - return nil, errors.Wrapf(ErrNotFound, "couldn't get scheduled task with ID %q", id) - default: + err := q.Reload(res) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "couldn't get scheduled task with ID %q", id) + } return nil, errors.WithStack(err) } + + return res, nil } // ScheduledTasksFilter represents filters for scheduled tasks. @@ -130,7 +131,7 @@ func FindScheduledTasks(q *reform.Querier, filters ScheduledTasksFilter) ([]*Sch } tasks := make([]*ScheduledTask, len(structs)) for i, s := range structs { - tasks[i] = s.(*ScheduledTask) + tasks[i] = s.(*ScheduledTask) //nolint:forcetypeassert } return tasks, nil } @@ -209,7 +210,7 @@ type ChangeScheduledTaskParams struct { } // Validate checks if params for scheduled tasks are valid. -func (p ChangeScheduledTaskParams) Validate() error { +func (p *ChangeScheduledTaskParams) Validate() error { if p.CronExpression != nil { _, err := cron.ParseStandard(*p.CronExpression) if err != nil { @@ -299,14 +300,15 @@ func checkUniqueScheduledTaskID(q *reform.Querier, id string) error { } task := &ScheduledTask{ID: id} - switch err := q.Reload(task); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Scheduled task with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(task) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Scheduled task with ID %q already exists.", id) } func checkUniqueScheduledTaskName(q *reform.Querier, name string) error { @@ -335,5 +337,51 @@ func nameFromTaskData(taskType ScheduledTaskType, taskData *ScheduledTaskData) ( return "", status.Errorf(codes.InvalidArgument, "Unknown type: %s", taskType) } } - return "", nil + return "", errors.New("scheduled task name cannot be empty") +} + +// Retention returns how many backup artifacts should be stored for the task. +func (s *ScheduledTask) Retention() (uint32, error) { + data, err := s.CommonBackupData() + if err != nil { + return 0, err + } + return data.Retention, nil +} + +// Mode returns task backup mode. +func (s *ScheduledTask) Mode() (BackupMode, error) { + data, err := s.CommonBackupData() + if err != nil { + return "", err + } + return data.Mode, nil +} + +// LocationID returns task location. +func (s *ScheduledTask) LocationID() (string, error) { + data, err := s.CommonBackupData() + if err != nil { + return "", err + } + return data.LocationID, nil +} + +func (s *ScheduledTask) CommonBackupData() (*CommonBackupTaskData, error) { + if s.Data != nil { + switch s.Type { + case ScheduledMySQLBackupTask: + if s.Data.MySQLBackupTask != nil { + return &s.Data.MySQLBackupTask.CommonBackupTaskData, nil + } + case ScheduledMongoDBBackupTask: + if s.Data.MongoDBBackupTask != nil { + return &s.Data.MongoDBBackupTask.CommonBackupTaskData, nil + } + default: + return nil, errors.Errorf("invalid backup type %s of scheduled task %s", s.Type, s.ID) + } + } + + return nil, errors.Errorf("empty backup data of scheduled task %s", s.ID) } diff --git a/managed/models/service_helpers.go b/managed/models/service_helpers.go index 1c6c2dc8d2..a2b04f9336 100644 --- a/managed/models/service_helpers.go +++ b/managed/models/service_helpers.go @@ -38,26 +38,27 @@ func checkServiceUniqueID(q *reform.Querier, id string) error { } row := &Service{ServiceID: id} - switch err := q.Reload(row); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Service with ID %q already exists.", id) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(row) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Service with ID %q already exists.", id) } func checkServiceUniqueName(q *reform.Querier, name string) error { _, err := q.FindOneFrom(ServiceTable, "service_name", name) - switch err { - case nil: - return status.Errorf(codes.AlreadyExists, "Service with name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Service with name %q already exists.", name) } func validateDBConnectionOptions(socket, host *string, port *uint16) error { @@ -128,7 +129,7 @@ func FindServices(q *reform.Querier, filters ServiceFilters) ([]*Service, error) services := make([]*Service, len(structs)) for i, s := range structs { - services[i] = s.(*Service) + services[i] = s.(*Service) //nolint:forcetypeassert } return services, nil @@ -168,14 +169,15 @@ func FindServiceByID(q *reform.Querier, id string) (*Service, error) { } row := &Service{ServiceID: id} - switch err := q.Reload(row); err { - case nil: - return row, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Service with ID %q not found.", id) - default: + err := q.Reload(row) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Service with ID %q not found.", id) + } return nil, errors.WithStack(err) } + + return row, nil } // FindServicesByIDs finds Services by IDs. @@ -198,7 +200,7 @@ func FindServicesByIDs(q *reform.Querier, ids []string) (map[string]*Service, er services := make(map[string]*Service, len(all)) for _, s := range all { - service := s.(*Service) + service := s.(*Service) //nolint:forcetypeassert services[service.ServiceID] = service } @@ -213,14 +215,14 @@ func FindServiceByName(q *reform.Querier, name string) (*Service, error) { var service Service err := q.FindOneTo(&service, "service_name", name) - switch err { - case nil: - return &service, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Service with name %q not found.", name) - default: + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Service with name %q not found.", name) + } return nil, errors.WithStack(err) } + + return &service, nil } // AddDBMSServiceParams contains parameters for adding DBMS (MySQL, PostgreSQL, MongoDB, External) Services. diff --git a/managed/models/settings_helpers.go b/managed/models/settings_helpers.go index 0f55e4fc13..146c66bae4 100644 --- a/managed/models/settings_helpers.go +++ b/managed/models/settings_helpers.go @@ -398,7 +398,7 @@ func ValidateSettings(params *ChangeSettingsParams) error { //nolint:cyclop } if _, err := validators.ValidateMetricResolution(v.dur); err != nil { - switch err.(type) { + switch err.(type) { //nolint:errorlint case validators.DurationNotAllowedError: return errors.Errorf("%s: should be a natural number of seconds", v.fieldName) case validators.MinDurationError: @@ -423,7 +423,7 @@ func ValidateSettings(params *ChangeSettingsParams) error { //nolint:cyclop } if _, err := validators.ValidateSTTCheckInterval(v.dur); err != nil { - switch err.(type) { + switch err.(type) { //nolint:errorlint case validators.DurationNotAllowedError: return errors.Errorf("%s: should be a natural number of seconds", v.fieldName) case validators.MinDurationError: @@ -436,7 +436,7 @@ func ValidateSettings(params *ChangeSettingsParams) error { //nolint:cyclop if params.DataRetention != 0 { if _, err := validators.ValidateDataRetention(params.DataRetention); err != nil { - switch err.(type) { + switch err.(type) { //nolint:errorlint case validators.DurationNotAllowedError: return errors.New("data_retention: should be a natural number of days") case validators.MinDurationError: diff --git a/managed/models/software_version_helpers.go b/managed/models/software_version_helpers.go index f14cde770f..9d49e33b4c 100644 --- a/managed/models/software_version_helpers.go +++ b/managed/models/software_version_helpers.go @@ -163,14 +163,15 @@ func FindServiceSoftwareVersionsByServiceID(q *reform.Querier, serviceID string) } versions := &ServiceSoftwareVersions{ServiceID: serviceID} - switch err := q.Reload(versions); err { - case nil: - return versions, nil - case reform.ErrNoRows: - return nil, errors.Wrapf(ErrNotFound, "service software versions by service id '%s'", serviceID) - default: + err := q.Reload(versions) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, errors.Wrapf(ErrNotFound, "service software versions by service id '%s'", serviceID) + } return nil, errors.WithStack(err) } + + return versions, nil } // FindServicesSoftwareVersionsFilter represents a filter for finding service software versions. @@ -217,7 +218,7 @@ func FindServicesSoftwareVersions( versions := make([]*ServiceSoftwareVersions, len(structs)) for i, s := range structs { - versions[i] = s.(*ServiceSoftwareVersions) + versions[i] = s.(*ServiceSoftwareVersions) //nolint:forcetypeassert } return versions, nil diff --git a/managed/models/template_helpers.go b/managed/models/template_helpers.go index ffb5d39300..e89f749f2d 100644 --- a/managed/models/template_helpers.go +++ b/managed/models/template_helpers.go @@ -32,14 +32,15 @@ func checkUniqueTemplateName(q *reform.Querier, name string) error { } template := &Template{Name: name} - switch err := q.Reload(template); err { - case nil: - return status.Errorf(codes.AlreadyExists, "Template with name %q already exists.", name) - case reform.ErrNoRows: - return nil - default: + err := q.Reload(template) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil + } return errors.WithStack(err) } + + return status.Errorf(codes.AlreadyExists, "Template with name %q already exists.", name) } // FindTemplates returns saved notification rule templates. @@ -51,7 +52,7 @@ func FindTemplates(q *reform.Querier) ([]Template, error) { templates := make([]Template, len(structs)) for i, s := range structs { - c := s.(*Template) + c := s.(*Template) //nolint:forcetypeassert templates[i] = *c } @@ -66,14 +67,15 @@ func FindTemplateByName(q *reform.Querier, name string) (*Template, error) { } template := &Template{Name: name} - switch err := q.Reload(template); err { - case nil: - return template, nil - case reform.ErrNoRows: - return nil, status.Errorf(codes.NotFound, "Template with name %q not found.", name) - default: + err := q.Reload(template) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, status.Errorf(codes.NotFound, "Template with name %q not found.", name) + } return nil, errors.WithStack(err) } + + return template, nil } // CreateTemplateParams are params for creating new rule template. diff --git a/managed/models/user_flags_helpers.go b/managed/models/user_flags_helpers.go index 3910e3cccd..7d7b9fc969 100644 --- a/managed/models/user_flags_helpers.go +++ b/managed/models/user_flags_helpers.go @@ -71,7 +71,7 @@ func CreateUser(q *reform.Querier, params *CreateUserParams) (*UserDetails, erro // Check user ID is unique row := &UserDetails{ID: params.UserID} err := q.Reload(row) - switch err { + switch err { //nolint:errorlint case nil: return nil, ErrUserAlreadyExists case reform.ErrNoRows: @@ -118,12 +118,13 @@ func FindUser(q *reform.Querier, userID int) (*UserDetails, error) { } row := &UserDetails{ID: userID} - switch err := q.Reload(row); err { - case nil: - return row, nil - case reform.ErrNoRows: - return nil, ErrNotFound - default: + err := q.Reload(row) + if err != nil { + if errors.Is(err, reform.ErrNoRows) { + return nil, ErrNotFound + } return nil, errors.WithStack(err) } + + return row, nil } diff --git a/managed/services/agents/connection_checker.go b/managed/services/agents/connection_checker.go index 201eb31573..3dc91d6f0f 100644 --- a/managed/services/agents/connection_checker.go +++ b/managed/services/agents/connection_checker.go @@ -99,7 +99,7 @@ func (c *ConnectionChecker) CheckConnectionToService(ctx context.Context, q *ref switch service.ServiceType { case models.MySQLServiceType: - tableCount := resp.(*agentpb.CheckConnectionResponse).GetStats().GetTableCount() + tableCount := resp.(*agentpb.CheckConnectionResponse).GetStats().GetTableCount() //nolint:forcetypeassert agent.TableCount = &tableCount l.Debugf("Updating table count: %d.", tableCount) if err = q.Update(agent); err != nil { @@ -115,7 +115,7 @@ func (c *ConnectionChecker) CheckConnectionToService(ctx context.Context, q *ref return errors.Errorf("unhandled Service type %s", service.ServiceType) } - msg := resp.(*agentpb.CheckConnectionResponse).Error + msg := resp.(*agentpb.CheckConnectionResponse).Error //nolint:forcetypeassert switch msg { case "": return nil @@ -202,11 +202,11 @@ func connectionRequest(q *reform.Querier, service *models.Service, agent *models func isExternalExporterConnectionCheckSupported(q *reform.Querier, pmmAgentID string) (bool, error) { pmmAgent, err := models.FindAgentByID(q, pmmAgentID) if err != nil { - return false, fmt.Errorf("failed to get PMM Agent: %s", err) + return false, fmt.Errorf("failed to get PMM Agent: %w", err) } pmmAgentVersion, err := version.Parse(*pmmAgent.Version) if err != nil { - return false, fmt.Errorf("failed to parse PMM agent version %q: %s", *pmmAgent.Version, err) + return false, fmt.Errorf("failed to parse PMM agent version %q: %w", *pmmAgent.Version, err) } if pmmAgentVersion.Less(checkExternalExporterConnectionPMMVersion) { diff --git a/managed/services/agents/deps.go b/managed/services/agents/deps.go index b540e6f915..52256d35ad 100644 --- a/managed/services/agents/deps.go +++ b/managed/services/agents/deps.go @@ -45,7 +45,7 @@ type qanClient interface { // retentionService is a subset of methods of backup.Client used by this package. // We use it instead of real type to avoid dependency cycle. type retentionService interface { - EnforceRetention(ctx context.Context, scheduleID string) error + EnforceRetention(scheduleID string) error } // jobsService is a subset of methods of agents.JobsService used by this package. diff --git a/managed/services/agents/jobs.go b/managed/services/agents/jobs.go index 82b44fa62d..d1cbdafb84 100644 --- a/managed/services/agents/jobs.go +++ b/managed/services/agents/jobs.go @@ -149,12 +149,12 @@ func (s *JobsService) RestartJob(ctx context.Context, jobID string) error { switch job.Type { case models.MySQLBackupJob: - if err := s.StartMySQLBackupJob(job.ID, job.PMMAgentID, job.Timeout, artifact.Name, dbConfig, locationConfig); err != nil { + if err := s.StartMySQLBackupJob(job.ID, job.PMMAgentID, job.Timeout, artifact.Name, dbConfig, locationConfig, artifact.Folder); err != nil { return errors.WithStack(err) } case models.MongoDBBackupJob: if err := s.StartMongoDBBackupJob(job.ID, job.PMMAgentID, job.Timeout, artifact.Name, dbConfig, - job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig); err != nil { + job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, artifact.Folder); err != nil { return errors.WithStack(err) } case models.MySQLRestoreBackupJob: @@ -187,7 +187,8 @@ func (s *JobsService) handleJobResult(_ context.Context, l *logrus.Entry, result t.Querier, job.Data.MySQLBackup.ArtifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.SuccessBackupStatus), + Status: models.SuccessBackupStatus.Pointer(), + Metadata: artifactMetadataFromProto(result.MysqlBackup.Metadata), }) if err != nil { return err @@ -201,11 +202,15 @@ func (s *JobsService) handleJobResult(_ context.Context, l *logrus.Entry, result return errors.Errorf("result type %s doesn't match job type %s", models.MongoDBBackupJob, job.Type) } + metadata := artifactMetadataFromProto(result.MongodbBackup.Metadata) + artifact, err := models.UpdateArtifact( t.Querier, job.Data.MongoDBBackup.ArtifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.SuccessBackupStatus), + Status: models.SuccessBackupStatus.Pointer(), + IsShardedCluster: result.MongodbBackup.IsShardedCluster, + Metadata: metadata, }) if err != nil { return err @@ -214,6 +219,31 @@ func (s *JobsService) handleJobResult(_ context.Context, l *logrus.Entry, result if artifact.Type == models.ScheduledArtifactType { scheduleID = artifact.ScheduleID } + + // If task was running by an old agent. Hacky code to support artifacts created on new server and old agent. + if metadata == nil && artifact.Mode == models.PITR && artifact.Folder != artifact.Name { + artifact, err := models.UpdateArtifact(t.Querier, artifact.ID, models.UpdateArtifactParams{Folder: &artifact.Name}) + if err != nil { + return errors.Wrapf(err, "failed to update artifact %s", artifact.ID) + } + + task, err := models.FindScheduledTaskByID(t.Querier, scheduleID) + if err != nil { + return errors.Wrapf(err, "cannot get scheduled task %s", scheduleID) + } + taskData := task.Data + taskData.MongoDBBackupTask.CommonBackupTaskData.Folder = artifact.Name + + params := models.ChangeScheduledTaskParams{ + Data: taskData, + } + + _, err = models.ChangeScheduledTask(t.Querier, scheduleID, params) + if err != nil { + return errors.Wrapf(err, "failed to update scheduled task %s", scheduleID) + } + } + case *agentpb.JobResult_MysqlRestoreBackup: if job.Type != models.MySQLRestoreBackupJob { return errors.Errorf("result type %s doesn't match job type %s", models.MySQLRestoreBackupJob, job.Type) @@ -276,7 +306,7 @@ func (s *JobsService) handleJobResult(_ context.Context, l *logrus.Entry, result if scheduleID != "" { go func() { - if err := s.retentionService.EnforceRetention(context.Background(), scheduleID); err != nil { + if err := s.retentionService.EnforceRetention(scheduleID); err != nil { l.Errorf("failed to enforce retention: %v", err) } }() @@ -288,11 +318,11 @@ func (s *JobsService) handleJobError(job *models.Job) error { switch job.Type { case models.MySQLBackupJob: _, err = models.UpdateArtifact(s.db.Querier, job.Data.MySQLBackup.ArtifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.ErrorBackupStatus), + Status: models.ErrorBackupStatus.Pointer(), }) case models.MongoDBBackupJob: _, err = models.UpdateArtifact(s.db.Querier, job.Data.MongoDBBackup.ArtifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.ErrorBackupStatus), + Status: models.ErrorBackupStatus.Pointer(), }) case models.MySQLRestoreBackupJob: _, err = models.ChangeRestoreHistoryItem( @@ -339,7 +369,7 @@ func (s *JobsService) handleJobProgress(_ context.Context, progress *agentpb.Job } // StartMySQLBackupJob starts mysql backup job on the pmm-agent. -func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig) error { //nolint:lll +func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string) error { //nolint:lll if err := PMMAgentSupported(s.r.db.Querier, pmmAgentID, "mysql backup", pmmAgentMinVersionForMySQLBackupAndRestore); err != nil { return err @@ -352,6 +382,7 @@ func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time Address: dbConfig.Address, Port: int32(dbConfig.Port), Socket: dbConfig.Socket, + Folder: folder, } switch { @@ -379,7 +410,7 @@ func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time if err != nil { return err } - if e := resp.(*agentpb.StartJobResponse).Error; e != "" { + if e := resp.(*agentpb.StartJobResponse).Error; e != "" { //nolint:forcetypeassert return errors.Errorf("failed to start MySQL backup job: %s", e) } @@ -396,6 +427,7 @@ func (s *JobsService) StartMongoDBBackupJob( mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, + folder string, ) error { var err error switch dataModel { @@ -420,6 +452,7 @@ func (s *JobsService) StartMongoDBBackupJob( Port: int32(dbConfig.Port), Socket: dbConfig.Socket, EnablePitr: mode == models.PITR, + Folder: folder, } if mongoDBReq.DataModel, err = convertDataModel(dataModel); err != nil { return err @@ -459,7 +492,7 @@ func (s *JobsService) StartMongoDBBackupJob( if err != nil { return err } - if e := resp.(*agentpb.StartJobResponse).Error; e != "" { + if e := resp.(*agentpb.StartJobResponse).Error; e != "" { //nolint:forcetypeassert return errors.Errorf("failed to start MongoDB backup job: %s", e) } @@ -474,6 +507,7 @@ func (s *JobsService) StartMySQLRestoreBackupJob( timeout time.Duration, name string, locationConfig *models.BackupLocationConfig, + folder string, ) error { if err := PMMAgentSupported(s.r.db.Querier, pmmAgentID, "mysql restore", pmmAgentMinVersionForMySQLBackupAndRestore); err != nil { @@ -491,6 +525,7 @@ func (s *JobsService) StartMySQLRestoreBackupJob( MysqlRestoreBackup: &agentpb.StartJobRequest_MySQLRestoreBackup{ ServiceId: serviceID, Name: name, + Folder: folder, LocationConfig: &agentpb.StartJobRequest_MySQLRestoreBackup_S3Config{ S3Config: convertS3ConfigModel(locationConfig.S3Config), }, @@ -507,7 +542,7 @@ func (s *JobsService) StartMySQLRestoreBackupJob( if err != nil { return err } - if e := resp.(*agentpb.StartJobResponse).Error; e != "" { + if e := resp.(*agentpb.StartJobResponse).Error; e != "" { //nolint:forcetypeassert return errors.Errorf("failed to start MySQL restore backup job: %s", e) } @@ -520,10 +555,12 @@ func (s *JobsService) StartMongoDBRestoreBackupJob( pmmAgentID string, timeout time.Duration, name string, + pbmBackupName string, dbConfig *models.DBConfig, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, + folder string, ) error { var err error switch dataModel { @@ -557,6 +594,8 @@ func (s *JobsService) StartMongoDBRestoreBackupJob( Port: int32(dbConfig.Port), Socket: dbConfig.Socket, PitrTimestamp: timestamppb.New(pitrTimestamp), + Folder: folder, + PbmMetadata: &backuppb.PbmMetadata{Name: pbmBackupName}, } switch { @@ -594,7 +633,7 @@ func (s *JobsService) StartMongoDBRestoreBackupJob( if err != nil { return err } - if e := resp.(*agentpb.StartJobResponse).Error; e != "" { + if e := resp.(*agentpb.StartJobResponse).Error; e != "" { //nolint:forcetypeassert return errors.Errorf("failed to start MonogDB restore backup job: %s", e) } @@ -740,3 +779,35 @@ func createJobLog(querier *reform.Querier, jobID, data string, chunkID int, last }) return err } + +// artifactMetadataFromProto returns artifact metadata converted from protobuf to Go model format. +func artifactMetadataFromProto(metadata *backuppb.Metadata) *models.Metadata { + if metadata == nil { + return nil + } + + files := make([]models.File, len(metadata.FileList)) + for i, file := range metadata.FileList { + files[i] = models.File{Name: file.Name, IsDirectory: file.IsDirectory} + } + + var res models.Metadata + + res.FileList = files + + if metadata.RestoreTo != nil { + t := metadata.RestoreTo.AsTime() + res.RestoreTo = &t + } + + if metadata.BackupToolMetadata != nil { + switch toolType := metadata.BackupToolMetadata.(type) { + case *backuppb.Metadata_PbmMetadata: + res.BackupToolData = &models.BackupToolData{PbmMetadata: &models.PbmMetadata{Name: toolType.PbmMetadata.Name}} + default: + // Do nothing. + } + } + + return &res +} diff --git a/managed/services/agents/jobs_test.go b/managed/services/agents/jobs_test.go new file mode 100644 index 0000000000..e6788e581d --- /dev/null +++ b/managed/services/agents/jobs_test.go @@ -0,0 +1,68 @@ +// Copyright (C) 2017 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package agents + +import ( + "testing" + "time" + + "github.com/AlekSi/pointer" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" + + backuppb "github.com/percona/pmm/api/managementpb/backup" + "github.com/percona/pmm/managed/models" +) + +func TestArtifactMetadataFromProto(t *testing.T) { + t.Run("all fields are filled", func(t *testing.T) { + protoMetadata := backuppb.Metadata{ + FileList: []*backuppb.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}}, + RestoreTo: ×tamppb.Timestamp{Seconds: 123, Nanos: 456}, + BackupToolMetadata: &backuppb.Metadata_PbmMetadata{PbmMetadata: &backuppb.PbmMetadata{Name: "some name"}}, + } + + expected := &models.Metadata{ + FileList: []models.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}}, + RestoreTo: pointer.ToTime(time.Unix(123, 456).UTC()), + BackupToolData: &models.BackupToolData{PbmMetadata: &models.PbmMetadata{Name: "some name"}}, + } + + actual := artifactMetadataFromProto(&protoMetadata) + assert.Equal(t, expected, actual) + }) + + t.Run("some fields are empty", func(t *testing.T) { + protoMetadata := backuppb.Metadata{ + FileList: []*backuppb.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}}, + } + + expected := &models.Metadata{ + FileList: []models.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}}, + } + + actual := artifactMetadataFromProto(&protoMetadata) + assert.Equal(t, expected, actual) + }) + + t.Run("argument is nil", func(t *testing.T) { + var protoMetadata *backuppb.Metadata + var expected *models.Metadata + + actual := artifactMetadataFromProto(protoMetadata) + assert.Equal(t, expected, actual) + }) +} diff --git a/managed/services/agents/registry.go b/managed/services/agents/registry.go index 84691cb739..514d92c2bc 100644 --- a/managed/services/agents/registry.go +++ b/managed/services/agents/registry.go @@ -286,7 +286,7 @@ func (r *Registry) ping(ctx context.Context, agent *pmmAgentInfo) error { return nil } roundtrip := time.Since(start) - agentTime := resp.(*agentpb.Pong).CurrentTime.AsTime() + agentTime := resp.(*agentpb.Pong).CurrentTime.AsTime() //nolint:forcetypeassert clockDrift := agentTime.Sub(start) - roundtrip/2 if clockDrift < 0 { clockDrift = -clockDrift diff --git a/managed/services/agents/roster.go b/managed/services/agents/roster.go index a914737c93..c42a2fe89c 100644 --- a/managed/services/agents/roster.go +++ b/managed/services/agents/roster.go @@ -46,7 +46,7 @@ func newRoster() *roster { } } -func (r *roster) add(pmmAgentID string, group agentGroup, agentIDs []string) (groupID string) { +func (r *roster) add(pmmAgentID string, group agentGroup, agentIDs []string) (groupID string) { //nolint:nonamedreturns r.rw.Lock() defer r.rw.Unlock() @@ -56,7 +56,7 @@ func (r *roster) add(pmmAgentID string, group agentGroup, agentIDs []string) (gr return } -func (r *roster) get(groupID string) (agentIDs []string) { +func (r *roster) get(groupID string) (agentIDs []string) { //nolint:nonamedreturns r.rw.RLock() defer r.rw.RUnlock() diff --git a/managed/services/agents/versioner.go b/managed/services/agents/versioner.go index f62290f53b..d91709128f 100644 --- a/managed/services/agents/versioner.go +++ b/managed/services/agents/versioner.go @@ -162,7 +162,7 @@ func (s *VersionerService) GetVersions(pmmAgentID string, softwareList []Softwar return nil, errors.WithStack(err) } - versionsResponse := response.(*agentpb.GetVersionsResponse).Versions + versionsResponse := response.(*agentpb.GetVersionsResponse).Versions //nolint:forcetypeassert if len(versionsResponse) != len(softwareRequest) { return nil, errors.Errorf("response and request slice length mismatch %d != %d", len(versionsResponse), len(softwareRequest)) diff --git a/managed/services/backup/backup_service.go b/managed/services/backup/backup_service.go index 6b28bec44c..ba04cec7e7 100644 --- a/managed/services/backup/backup_service.go +++ b/managed/services/backup/backup_service.go @@ -41,18 +41,18 @@ type Service struct { jobsService jobsService agentService agentService compatibilityService compatibilityService - pitrTimerangeService pitrTimerangeService + pbmPITRService pbmPITRService } // NewService creates new backups logic service. -func NewService(db *reform.DB, jobsService jobsService, agentService agentService, cSvc compatibilityService, pitrSvc pitrTimerangeService) *Service { +func NewService(db *reform.DB, jobsService jobsService, agentService agentService, cSvc compatibilityService, pbmPITRService pbmPITRService) *Service { return &Service{ l: logrus.WithField("component", "management/backup/backup"), db: db, jobsService: jobsService, agentService: agentService, compatibilityService: cSvc, - pitrTimerangeService: pitrSvc, + pbmPITRService: pbmPITRService, } } @@ -66,6 +66,7 @@ type PerformBackupParams struct { Mode models.BackupMode Retries uint32 RetryInterval time.Duration + Folder string } // PerformBackup starts on-demand backup. @@ -161,12 +162,13 @@ func (s *Service) PerformBackup(ctx context.Context, params PerformBackupParams) Mode: params.Mode, Status: models.PendingBackupStatus, ScheduleID: params.ScheduleID, + Folder: params.Folder, }); err != nil { return err } } else { if artifact, err = models.UpdateArtifact(tx.Querier, artifact.ID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.PendingBackupStatus), + Status: models.PendingBackupStatus.Pointer(), }); err != nil { return err } @@ -208,10 +210,10 @@ func (s *Service) PerformBackup(ctx context.Context, params PerformBackupParams) switch svc.ServiceType { case models.MySQLServiceType: - err = s.jobsService.StartMySQLBackupJob(job.ID, job.PMMAgentID, 0, name, dbConfig, locationConfig) + err = s.jobsService.StartMySQLBackupJob(job.ID, job.PMMAgentID, 0, name, dbConfig, locationConfig, params.Folder) case models.MongoDBServiceType: err = s.jobsService.StartMongoDBBackupJob(job.ID, job.PMMAgentID, 0, name, dbConfig, - job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig) + job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, params.Folder) case models.PostgreSQLServiceType, models.ProxySQLServiceType, models.HAProxyServiceType, @@ -224,7 +226,7 @@ func (s *Service) PerformBackup(ctx context.Context, params PerformBackupParams) var target *agents.AgentNotSupportedError if errors.As(err, &target) { _, dbErr := models.UpdateArtifact(s.db.Querier, artifact.ID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.ErrorBackupStatus), + Status: models.ErrorBackupStatus.Pointer(), }) if dbErr != nil { @@ -243,11 +245,13 @@ type restoreJobParams struct { ServiceID string AgentID string ArtifactName string + pbmBackupName string LocationModel *models.BackupLocation ServiceType models.ServiceType DBConfig *models.DBConfig DataModel models.DataModel PITRTimestamp time.Time + Folder string } // RestoreBackup starts restore backup job. @@ -346,6 +350,13 @@ func (s *Service) RestoreBackup(ctx context.Context, serviceID, artifactID strin return err } + var artifactFolder string + + // Only artifacts taken with new agents can be restored from a folder. + if len(artifact.MetadataList) != 0 { + artifactFolder = artifact.Folder + } + params = restoreJobParams{ JobID: job.ID, ServiceID: serviceID, @@ -356,6 +367,13 @@ func (s *Service) RestoreBackup(ctx context.Context, serviceID, artifactID strin DBConfig: dbConfig, DataModel: artifact.DataModel, PITRTimestamp: pitrTimestamp, + Folder: artifactFolder, + } + + if len(artifact.MetadataList) != 0 && + artifact.MetadataList[0].BackupToolData != nil && + artifact.MetadataList[0].BackupToolData.PbmMetadata != nil { + params.pbmBackupName = artifact.MetadataList[0].BackupToolData.PbmMetadata.Name } return nil @@ -429,17 +447,20 @@ func (s *Service) startRestoreJob(params *restoreJobParams) error { params.ServiceID, // TODO: It seems that this parameter is redundant 0, params.ArtifactName, - locationConfig) + locationConfig, + params.Folder) case models.MongoDBServiceType: return s.jobsService.StartMongoDBRestoreBackupJob( params.JobID, params.AgentID, 0, params.ArtifactName, + params.pbmBackupName, params.DBConfig, params.DataModel, locationConfig, - params.PITRTimestamp) + params.PITRTimestamp, + params.Folder) case models.PostgreSQLServiceType, models.ProxySQLServiceType, models.HAProxyServiceType, @@ -524,6 +545,12 @@ func (s *Service) checkArtifactModePreconditions(ctx context.Context, artifactID return errors.Wrapf(ErrArtifactNotReady, "artifact %q in status: %q", artifactID, artifact.Status) } + if artifact.IsShardedCluster { + return errors.Wrapf(ErrIncompatibleService, + "artifact %q was made for a sharded cluster and cannot be restored from UI; for more information refer to "+ + "https://docs.percona.com/percona-monitoring-and-management/get-started/backup/backup_mongo.html", artifactID) + } + if err := checkArtifactMode(artifact, pitrTimestamp); err != nil { return err } @@ -542,7 +569,8 @@ func (s *Service) checkArtifactModePreconditions(ctx context.Context, artifactID return errors.Wrapf(ErrIncompatibleLocationType, "point in time recovery available only for S3 locations") } - timeRanges, err := s.pitrTimerangeService.ListPITRTimeranges(ctx, artifact.Name, location) + storage := GetStorageForLocation(location) + timeRanges, err := s.pbmPITRService.ListPITRTimeranges(ctx, storage, location, artifact) if err != nil { return err } diff --git a/managed/services/backup/backup_service_test.go b/managed/services/backup/backup_service_test.go index 7a03234201..b2062d9c66 100644 --- a/managed/services/backup/backup_service_test.go +++ b/managed/services/backup/backup_service_test.go @@ -81,6 +81,8 @@ func TestPerformBackup(t *testing.T) { mockedCompatibilityService := &mockCompatibilityService{} backupService := NewService(db, mockedJobsService, mockedAgentService, mockedCompatibilityService, nil) + artifactFolder := "artifact_folder" + s3Location, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ Name: "Test s3 location", Description: "Test s3 description", @@ -101,7 +103,7 @@ func TestPerformBackup(t *testing.T) { Description: "Test local description", BackupLocationConfig: models.BackupLocationConfig{ FilesystemConfig: &models.FilesystemLocationConfig{ - Path: "/opt/data/", + Path: "/opt/data", }, }, }) @@ -162,7 +164,7 @@ func TestPerformBackup(t *testing.T) { S3Config: tc.locationModel.S3Config, } mockedJobsService.On("StartMySQLBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), time.Duration(0), - mock.Anything, mock.Anything, locationConfig).Return(nil).Once() + mock.Anything, mock.Anything, locationConfig, artifactFolder).Return(nil).Once() } artifactID, err := backupService.PerformBackup(ctx, PerformBackupParams{ @@ -171,6 +173,7 @@ func TestPerformBackup(t *testing.T) { Name: tc.name + "_" + "test_backup", DataModel: tc.dataModel, Mode: models.Snapshot, + Folder: artifactFolder, }) if tc.expectedError != nil { @@ -201,6 +204,7 @@ func TestPerformBackup(t *testing.T) { Name: "test_backup", DataModel: models.PhysicalDataModel, Mode: models.PITR, + Folder: artifactFolder, }) assert.ErrorIs(t, err, ErrIncompatibleDataModel) assert.Empty(t, artifactID) @@ -214,6 +218,7 @@ func TestPerformBackup(t *testing.T) { Name: "test_backup", DataModel: models.PhysicalDataModel, Mode: models.PITR, + Folder: artifactFolder, }) assert.ErrorContains(t, err, "Empty Service ID") assert.Empty(t, artifactID) @@ -228,6 +233,7 @@ func TestPerformBackup(t *testing.T) { Name: "test_backup", DataModel: models.PhysicalDataModel, Mode: models.Incremental, + Folder: artifactFolder, }) assert.ErrorContains(t, err, "the only supported backups mode for mongoDB is snapshot and PITR") assert.Empty(t, artifactID) @@ -251,6 +257,8 @@ func TestRestoreBackup(t *testing.T) { mockedCompatibilityService := &mockCompatibilityService{} backupService := NewService(db, mockedJobsService, mockedAgentService, mockedCompatibilityService, nil) + artifactFolder := "artifact_folder" + s3Location, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ Name: "Test location", Description: "Test description", @@ -271,7 +279,7 @@ func TestRestoreBackup(t *testing.T) { Description: "Test local description", BackupLocationConfig: models.BackupLocationConfig{ FilesystemConfig: &models.FilesystemLocationConfig{ - Path: "/opt/data/", + Path: "/opt/data", }, }, }) @@ -288,6 +296,12 @@ func TestRestoreBackup(t *testing.T) { DataModel: models.PhysicalDataModel, Mode: models.Snapshot, Status: models.SuccessBackupStatus, + Folder: artifactFolder, + }) + require.NoError(t, err) + + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{FileList: []models.File{{Name: "test_file_name"}}}, }) require.NoError(t, err) @@ -314,7 +328,7 @@ func TestRestoreBackup(t *testing.T) { if tc.expectedError == nil { mockedJobsService.On("StartMySQLRestoreBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), - pointer.GetString(agent.ServiceID), mock.Anything, artifact.Name, mock.Anything).Return(nil).Once() + pointer.GetString(agent.ServiceID), mock.Anything, artifact.Name, mock.Anything, artifactFolder).Return(nil).Once() } restoreID, err := backupService.RestoreBackup(ctx, pointer.GetString(agent.ServiceID), artifact.ID, time.Unix(0, 0)) if tc.expectedError != nil { @@ -329,7 +343,7 @@ func TestRestoreBackup(t *testing.T) { t.Run("artifact not ready", func(t *testing.T) { updatedArtifact, err := models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.PendingBackupStatus), + Status: models.PendingBackupStatus.Pointer(), }) require.NoError(t, err) require.NotNil(t, updatedArtifact) @@ -351,6 +365,12 @@ func TestRestoreBackup(t *testing.T) { DataModel: models.LogicalDataModel, Mode: models.Snapshot, Status: models.SuccessBackupStatus, + Folder: artifactFolder, + }) + require.NoError(t, err) + + artifactWithVersion, err = models.UpdateArtifact(db.Querier, artifactWithVersion.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{BackupToolData: &models.BackupToolData{PbmMetadata: &models.PbmMetadata{Name: "artifact_repr_name"}}}, }) require.NoError(t, err) @@ -402,8 +422,15 @@ func TestRestoreBackup(t *testing.T) { mockedCompatibilityService.On("CheckArtifactCompatibility", tc.artifact.ID, tc.dbVersion).Return(tc.expectedError).Once() if tc.expectedError == nil { - mockedJobsService.On("StartMongoDBRestoreBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), - time.Duration(0), tc.artifact.Name, mock.Anything, tc.artifact.DataModel, mock.Anything, time.Unix(0, 0)).Return(nil).Once() + if len(tc.artifact.MetadataList) != 0 && tc.artifact.MetadataList[0].BackupToolData != nil { + mockedJobsService.On("StartMongoDBRestoreBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), + time.Duration(0), tc.artifact.Name, tc.artifact.MetadataList[0].BackupToolData.PbmMetadata.Name, mock.Anything, tc.artifact.DataModel, + mock.Anything, time.Unix(0, 0), tc.artifact.Folder).Return(nil).Once() + } else { + mockedJobsService.On("StartMongoDBRestoreBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), + time.Duration(0), tc.artifact.Name, "", mock.Anything, tc.artifact.DataModel, + mock.Anything, time.Unix(0, 0), tc.artifact.Folder).Return(nil).Once() + } } restoreID, err := backupService.RestoreBackup(ctx, pointer.GetString(agent.ServiceID), tc.artifact.ID, time.Unix(0, 0)) if tc.expectedError != nil { @@ -463,8 +490,8 @@ func TestCheckArtifactModePreconditions(t *testing.T) { }) db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - mockedPitrTimerangeService := &mockPitrTimerangeService{} - backupService := NewService(db, nil, nil, nil, mockedPitrTimerangeService) + mockedPbmPITRService := &mockPbmPITRService{} + backupService := NewService(db, nil, nil, nil, mockedPbmPITRService) locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ Name: "Test location", @@ -657,13 +684,28 @@ func TestCheckArtifactModePreconditions(t *testing.T) { }, err: nil, }, + { + name: "sharded cluster restore not supported", + pitrValue: time.Unix(0, 0), + artifactParams: models.CreateArtifactParams{ + Name: "mongo-artifact-name-7", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + IsShardedCluster: true, + }, + err: ErrIncompatibleService, + }, } { t.Run(tc.name, func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, tc.artifactParams) require.NoError(t, err) if tc.prepareMock { - mockedPitrTimerangeService.On("ListPITRTimeranges", ctx, artifact.Name, locationRes).Return(timelineList, nil).Once() + mockedPbmPITRService.On("ListPITRTimeranges", ctx, mock.Anything, locationRes, artifact).Return(timelineList, nil).Once() } err = backupService.checkArtifactModePreconditions(ctx, artifact.ID, tc.pitrValue) @@ -676,7 +718,7 @@ func TestCheckArtifactModePreconditions(t *testing.T) { } }) - mock.AssertExpectationsForObjects(t, mockedPitrTimerangeService) + mock.AssertExpectationsForObjects(t, mockedPbmPITRService) } func TestInTimeSpan(t *testing.T) { diff --git a/managed/services/backup/deps.go b/managed/services/backup/deps.go index dc9939ac67..c71535be13 100644 --- a/managed/services/backup/deps.go +++ b/managed/services/backup/deps.go @@ -25,13 +25,12 @@ import ( ) //go:generate ../../../bin/mockery -name=jobsService -case=snake -inpkg -testonly -//go:generate ../../../bin/mockery -name=s3 -case=snake -inpkg -testonly //go:generate ../../../bin/mockery -name=agentService -case=snake -inpkg -testonly //go:generate ../../../bin/mockery -name=versioner -case=snake -inpkg -testonly -//go:generate ../../../bin/mockery -name=pitrLocationClient -case=snake -inpkg -testonly //go:generate ../../../bin/mockery -name=compatibilityService -case=snake -inpkg -testonly -//go:generate ../../../bin/mockery -name=pitrLocationClient -case=snake -inpkg -testonly -//go:generate ../../../bin/mockery -name=pitrTimerangeService -case=snake -inpkg -testonly +//go:generate ../../../bin/mockery -name=pbmPITRService -case=snake -inpkg -testonly +//go:generate ../../../bin/mockery -name=Storage -case=snake -inpkg -testonly +//go:generate ../../../bin/mockery -name=removalService -case=snake -inpkg -testonly // jobsService is a subset of methods of agents.JobsService used by this package. // We use it instead of real type for testing and to avoid dependency cycle. @@ -44,6 +43,7 @@ type jobsService interface { name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, + folder string, ) error StartMySQLRestoreBackupJob( jobID string, @@ -52,6 +52,7 @@ type jobsService interface { timeout time.Duration, name string, locationConfig *models.BackupLocationConfig, + folder string, ) error StartMongoDBBackupJob( jobID string, @@ -62,25 +63,27 @@ type jobsService interface { mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, + folder string, ) error StartMongoDBRestoreBackupJob( jobID string, pmmAgentID string, timeout time.Duration, name string, + pbmBackupName string, dbConfig *models.DBConfig, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, + folder string, ) error } -type s3 interface { - RemoveRecursive(ctx context.Context, endpoint, accessKey, secretKey, bucketName, prefix string) error -} - type removalService interface { - DeleteArtifact(ctx context.Context, artifactID string, removeFiles bool) error + // DeleteArtifact deletes specified artifact along with files if specified. + DeleteArtifact(storage Storage, artifactID string, removeFiles bool) error + // TrimPITRArtifact removes first N records from PITR artifact. Removes snapshots, PITR chunks and corresponding data from database. + TrimPITRArtifact(storage Storage, artifactID string, firstN int) error } // agentService is a subset of methods of agents.AgentService used by this package. @@ -103,17 +106,23 @@ type compatibilityService interface { CheckArtifactCompatibility(artifactID, targetDBVersion string) error } -type pitrLocationClient interface { +// pbmPITRService provides methods that help us inspect and manage PITR oplog slices. +type pbmPITRService interface { + // ListPITRTimeranges list the available PITR timeranges for the given artifact in the provided location + ListPITRTimeranges(ctx context.Context, locationClient Storage, location *models.BackupLocation, artifact *models.Artifact) ([]Timeline, error) + // GetPITRFiles returns list of PITR chunks. If 'until' specified, returns only chunks created before that date, otherwise returns all artifact chunks. + GetPITRFiles(ctx context.Context, locationClient Storage, location *models.BackupLocation, artifact *models.Artifact, until *time.Time) ([]*oplogChunk, error) +} + +type Storage interface { // FileStat returns file info. It returns error if file is empty or not exists. FileStat(ctx context.Context, endpoint, accessKey, secretKey, bucketName, name string) (minio.FileInfo, error) // List scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. List(ctx context.Context, endpoint, accessKey, secretKey, bucketName, prefix, suffix string) ([]minio.FileInfo, error) -} - -// pitrTimerangeService provides methods that help us inspect PITR artifacts -type pitrTimerangeService interface { - // ListPITRTimeranges list the available PITR timeranges for the given artifact in the provided location - ListPITRTimeranges(ctx context.Context, artifactName string, location *models.BackupLocation) ([]Timeline, error) + // Remove removes single objects from storage. + Remove(ctx context.Context, endpoint, accessKey, secretKey, bucketName, objectName string) error + // RemoveRecursive removes objects recursively from storage with given prefix. + RemoveRecursive(ctx context.Context, endpoint, accessKey, secretKey, bucketName, prefix string) (rerr error) } diff --git a/managed/services/backup/errors.go b/managed/services/backup/errors.go index d7079b05a7..f6b5d15c6b 100644 --- a/managed/services/backup/errors.go +++ b/managed/services/backup/errors.go @@ -46,4 +46,6 @@ var ( ErrAnotherOperationInProgress = errors.New("another operation in progress") // ErrArtifactNotReady is returned when artifact not ready to be restored, i.e. not in success status. ErrArtifactNotReady = errors.New("artifact not in success status") + // ErrIncorrectArtifactStatus is returned when artifact status doesn't fit to proceed with action. + ErrIncorrectArtifactStatus = errors.New("incorrect artifact status") ) diff --git a/managed/services/backup/mock_jobs_service_test.go b/managed/services/backup/mock_jobs_service_test.go index f9961b021d..8121ee32a3 100644 --- a/managed/services/backup/mock_jobs_service_test.go +++ b/managed/services/backup/mock_jobs_service_test.go @@ -15,13 +15,13 @@ type mockJobsService struct { mock.Mock } -// StartMongoDBBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig -func (_m *mockJobsService) StartMongoDBBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig) error { - ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig) +// StartMongoDBBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig, folder +func (_m *mockJobsService) StartMongoDBBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, folder string) error { + ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig, folder) var r0 error - if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, models.BackupMode, models.DataModel, *models.BackupLocationConfig) error); ok { - r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig) + if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, models.BackupMode, models.DataModel, *models.BackupLocationConfig, string) error); ok { + r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, mode, dataModel, locationConfig, folder) } else { r0 = ret.Error(0) } @@ -29,13 +29,13 @@ func (_m *mockJobsService) StartMongoDBBackupJob(jobID string, pmmAgentID string return r0 } -// StartMongoDBRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, dataModel, locationConfig, pitrTimestamp -func (_m *mockJobsService) StartMongoDBRestoreBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time) error { - ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, dataModel, locationConfig, pitrTimestamp) +// StartMongoDBRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, pbmBackupName, dbConfig, dataModel, locationConfig, pitrTimestamp, folder +func (_m *mockJobsService) StartMongoDBRestoreBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, pbmBackupName string, dbConfig *models.DBConfig, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, folder string) error { + ret := _m.Called(jobID, pmmAgentID, timeout, name, pbmBackupName, dbConfig, dataModel, locationConfig, pitrTimestamp, folder) var r0 error - if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, models.DataModel, *models.BackupLocationConfig, time.Time) error); ok { - r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, dataModel, locationConfig, pitrTimestamp) + if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, string, *models.DBConfig, models.DataModel, *models.BackupLocationConfig, time.Time, string) error); ok { + r0 = rf(jobID, pmmAgentID, timeout, name, pbmBackupName, dbConfig, dataModel, locationConfig, pitrTimestamp, folder) } else { r0 = ret.Error(0) } @@ -43,13 +43,13 @@ func (_m *mockJobsService) StartMongoDBRestoreBackupJob(jobID string, pmmAgentID return r0 } -// StartMySQLBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, locationConfig -func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig) error { - ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig) +// StartMySQLBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder +func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string) error { + ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder) var r0 error - if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, *models.BackupLocationConfig) error); ok { - r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig) + if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, *models.BackupLocationConfig, string) error); ok { + r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder) } else { r0 = ret.Error(0) } @@ -57,13 +57,13 @@ func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, return r0 } -// StartMySQLRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, serviceID, timeout, name, locationConfig -func (_m *mockJobsService) StartMySQLRestoreBackupJob(jobID string, pmmAgentID string, serviceID string, timeout time.Duration, name string, locationConfig *models.BackupLocationConfig) error { - ret := _m.Called(jobID, pmmAgentID, serviceID, timeout, name, locationConfig) +// StartMySQLRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder +func (_m *mockJobsService) StartMySQLRestoreBackupJob(jobID string, pmmAgentID string, serviceID string, timeout time.Duration, name string, locationConfig *models.BackupLocationConfig, folder string) error { + ret := _m.Called(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder) var r0 error - if rf, ok := ret.Get(0).(func(string, string, string, time.Duration, string, *models.BackupLocationConfig) error); ok { - r0 = rf(jobID, pmmAgentID, serviceID, timeout, name, locationConfig) + if rf, ok := ret.Get(0).(func(string, string, string, time.Duration, string, *models.BackupLocationConfig, string) error); ok { + r0 = rf(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder) } else { r0 = ret.Error(0) } diff --git a/managed/services/backup/mock_pbm_pitr_service_test.go b/managed/services/backup/mock_pbm_pitr_service_test.go new file mode 100644 index 0000000000..104259ebe8 --- /dev/null +++ b/managed/services/backup/mock_pbm_pitr_service_test.go @@ -0,0 +1,63 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package backup + +import ( + context "context" + time "time" + + mock "github.com/stretchr/testify/mock" + + models "github.com/percona/pmm/managed/models" +) + +// mockPbmPITRService is an autogenerated mock type for the pbmPITRService type +type mockPbmPITRService struct { + mock.Mock +} + +// GetPITRFiles provides a mock function with given fields: ctx, locationClient, location, artifact, until +func (_m *mockPbmPITRService) GetPITRFiles(ctx context.Context, locationClient Storage, location *models.BackupLocation, artifact *models.Artifact, until *time.Time) ([]*oplogChunk, error) { + ret := _m.Called(ctx, locationClient, location, artifact, until) + + var r0 []*oplogChunk + if rf, ok := ret.Get(0).(func(context.Context, Storage, *models.BackupLocation, *models.Artifact, *time.Time) []*oplogChunk); ok { + r0 = rf(ctx, locationClient, location, artifact, until) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*oplogChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, Storage, *models.BackupLocation, *models.Artifact, *time.Time) error); ok { + r1 = rf(ctx, locationClient, location, artifact, until) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListPITRTimeranges provides a mock function with given fields: ctx, locationClient, location, artifact +func (_m *mockPbmPITRService) ListPITRTimeranges(ctx context.Context, locationClient Storage, location *models.BackupLocation, artifact *models.Artifact) ([]Timeline, error) { + ret := _m.Called(ctx, locationClient, location, artifact) + + var r0 []Timeline + if rf, ok := ret.Get(0).(func(context.Context, Storage, *models.BackupLocation, *models.Artifact) []Timeline); ok { + r0 = rf(ctx, locationClient, location, artifact) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]Timeline) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, Storage, *models.BackupLocation, *models.Artifact) error); ok { + r1 = rf(ctx, locationClient, location, artifact) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/managed/services/backup/mock_pitr_timerange_service_test.go b/managed/services/backup/mock_pitr_timerange_service_test.go deleted file mode 100644 index d1558629d0..0000000000 --- a/managed/services/backup/mock_pitr_timerange_service_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package backup - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - models "github.com/percona/pmm/managed/models" -) - -// mockPitrTimerangeService is an autogenerated mock type for the pitrTimerangeService type -type mockPitrTimerangeService struct { - mock.Mock -} - -// ListPITRTimeranges provides a mock function with given fields: ctx, artifactName, location -func (_m *mockPitrTimerangeService) ListPITRTimeranges(ctx context.Context, artifactName string, location *models.BackupLocation) ([]Timeline, error) { - ret := _m.Called(ctx, artifactName, location) - - var r0 []Timeline - if rf, ok := ret.Get(0).(func(context.Context, string, *models.BackupLocation) []Timeline); ok { - r0 = rf(ctx, artifactName, location) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]Timeline) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, *models.BackupLocation) error); ok { - r1 = rf(ctx, artifactName, location) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/managed/services/backup/mock_removal_service_test.go b/managed/services/backup/mock_removal_service_test.go new file mode 100644 index 0000000000..b7b7675a35 --- /dev/null +++ b/managed/services/backup/mock_removal_service_test.go @@ -0,0 +1,38 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package backup + +import mock "github.com/stretchr/testify/mock" + +// mockRemovalService is an autogenerated mock type for the removalService type +type mockRemovalService struct { + mock.Mock +} + +// DeleteArtifact provides a mock function with given fields: storage, artifactID, removeFiles +func (_m *mockRemovalService) DeleteArtifact(storage Storage, artifactID string, removeFiles bool) error { + ret := _m.Called(storage, artifactID, removeFiles) + + var r0 error + if rf, ok := ret.Get(0).(func(Storage, string, bool) error); ok { + r0 = rf(storage, artifactID, removeFiles) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TrimPITRArtifact provides a mock function with given fields: storage, artifactID, firstN +func (_m *mockRemovalService) TrimPITRArtifact(storage Storage, artifactID string, firstN int) error { + ret := _m.Called(storage, artifactID, firstN) + + var r0 error + if rf, ok := ret.Get(0).(func(Storage, string, int) error); ok { + r0 = rf(storage, artifactID, firstN) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/managed/services/backup/mock_s3_test.go b/managed/services/backup/mock_s3_test.go deleted file mode 100644 index d5c97f853d..0000000000 --- a/managed/services/backup/mock_s3_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package backup - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// mockS3 is an autogenerated mock type for the s3 type -type mockS3 struct { - mock.Mock -} - -// RemoveRecursive provides a mock function with given fields: ctx, endpoint, accessKey, secretKey, bucketName, prefix -func (_m *mockS3) RemoveRecursive(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, prefix string) error { - ret := _m.Called(ctx, endpoint, accessKey, secretKey, bucketName, prefix) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) error); ok { - r0 = rf(ctx, endpoint, accessKey, secretKey, bucketName, prefix) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/managed/services/backup/mock_pitr_location_client_test.go b/managed/services/backup/mock_storage_test.go similarity index 52% rename from managed/services/backup/mock_pitr_location_client_test.go rename to managed/services/backup/mock_storage_test.go index 82a68a377a..5de2ba4101 100644 --- a/managed/services/backup/mock_pitr_location_client_test.go +++ b/managed/services/backup/mock_storage_test.go @@ -10,13 +10,13 @@ import ( minio "github.com/percona/pmm/managed/services/minio" ) -// mockPitrLocationClient is an autogenerated mock type for the pitrLocationClient type -type mockPitrLocationClient struct { +// MockStorage is an autogenerated mock type for the Storage type +type MockStorage struct { mock.Mock } // FileStat provides a mock function with given fields: ctx, endpoint, accessKey, secretKey, bucketName, name -func (_m *mockPitrLocationClient) FileStat(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, name string) (minio.FileInfo, error) { +func (_m *MockStorage) FileStat(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, name string) (minio.FileInfo, error) { ret := _m.Called(ctx, endpoint, accessKey, secretKey, bucketName, name) var r0 minio.FileInfo @@ -37,7 +37,7 @@ func (_m *mockPitrLocationClient) FileStat(ctx context.Context, endpoint string, } // List provides a mock function with given fields: ctx, endpoint, accessKey, secretKey, bucketName, prefix, suffix -func (_m *mockPitrLocationClient) List(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, prefix string, suffix string) ([]minio.FileInfo, error) { +func (_m *MockStorage) List(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, prefix string, suffix string) ([]minio.FileInfo, error) { ret := _m.Called(ctx, endpoint, accessKey, secretKey, bucketName, prefix, suffix) var r0 []minio.FileInfo @@ -58,3 +58,31 @@ func (_m *mockPitrLocationClient) List(ctx context.Context, endpoint string, acc return r0, r1 } + +// Remove provides a mock function with given fields: ctx, endpoint, accessKey, secretKey, bucketName, objectName +func (_m *MockStorage) Remove(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, objectName string) error { + ret := _m.Called(ctx, endpoint, accessKey, secretKey, bucketName, objectName) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) error); ok { + r0 = rf(ctx, endpoint, accessKey, secretKey, bucketName, objectName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveRecursive provides a mock function with given fields: ctx, endpoint, accessKey, secretKey, bucketName, prefix +func (_m *MockStorage) RemoveRecursive(ctx context.Context, endpoint string, accessKey string, secretKey string, bucketName string, prefix string) error { + ret := _m.Called(ctx, endpoint, accessKey, secretKey, bucketName, prefix) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) error); ok { + r0 = rf(ctx, endpoint, accessKey, secretKey, bucketName, prefix) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/managed/services/backup/pitr_timerange_service.go b/managed/services/backup/pitr_timerange_service.go index 4c5c804bac..cc82678d6d 100644 --- a/managed/services/backup/pitr_timerange_service.go +++ b/managed/services/backup/pitr_timerange_service.go @@ -37,17 +37,15 @@ const ( var errUnsupportedLocation = errors.New("unsupported location config") -// PITRTimerangeService helps perform file lookups in a backup locationClient location -type PITRTimerangeService struct { - l *logrus.Entry - locationClient pitrLocationClient +// PBMPITRService helps to perform PITR chunks lookup in a backup storage +type PBMPITRService struct { + l *logrus.Entry } -// NewPITRTimerangeService creates new backup locationClient service. -func NewPITRTimerangeService(pitrLocationClient pitrLocationClient) *PITRTimerangeService { - return &PITRTimerangeService{ - l: logrus.WithField("component", "services/backup/pitr_storage"), - locationClient: pitrLocationClient, +// NewPBMPITRService creates new backup PBMPITRService service. +func NewPBMPITRService() *PBMPITRService { + return &PBMPITRService{ + l: logrus.WithField("component", "services/backup/pitr_storage"), } } @@ -111,16 +109,27 @@ func file(ext string) compressionType { } } -func (ss *PITRTimerangeService) getPITROplogs(ctx context.Context, location *models.BackupLocation, artifactName string) ([]*oplogChunk, error) { +func (s *PBMPITRService) getPITROplogs(ctx context.Context, storage Storage, location *models.BackupLocation, artifact *models.Artifact) ([]*oplogChunk, error) { + var oplogChunks []*oplogChunk + + if storage == nil { + return oplogChunks, nil + } if location.S3Config == nil { return nil, errUnsupportedLocation } - var err error - var oplogChunks []*oplogChunk + var prefix string + + // Only artifacts taken with new agents can be restored from artifact folder. + if len(artifact.MetadataList) == 0 { + prefix = path.Join(artifact.Name, pitrFSPrefix) + } else { + prefix = path.Join(artifact.Folder, pitrFSPrefix) + } - prefix := path.Join(artifactName, pitrFSPrefix) - pitrFiles, err := ss.locationClient.List(ctx, location.S3Config.Endpoint, location.S3Config.AccessKey, location.S3Config.SecretKey, location.S3Config.BucketName, prefix, "") //nolint:lll + s3Config := location.S3Config + pitrFiles, err := storage.List(ctx, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, prefix, "") if err != nil { return nil, errors.Wrap(err, "get list of pitr chunks") } @@ -130,7 +139,7 @@ func (ss *PITRTimerangeService) getPITROplogs(ctx context.Context, location *mod for _, f := range pitrFiles { if f.IsDeleteMarker { - ss.l.Debugf("skip pitr chunk %s/%s because of file has delete marker", prefix, f.Name) + s.l.Debugf("skip pitr chunk %s/%s because of file has delete marker", prefix, f.Name) continue } chunk := pitrMetaFromFileName(prefix, f.Name) @@ -143,10 +152,11 @@ func (ss *PITRTimerangeService) getPITROplogs(ctx context.Context, location *mod return oplogChunks, nil } -func (ss *PITRTimerangeService) ListPITRTimeranges(ctx context.Context, artifactName string, location *models.BackupLocation) ([]Timeline, error) { +// ListPITRTimeranges returns the available PITR timeranges for the given artifact in the provided location. +func (s *PBMPITRService) ListPITRTimeranges(ctx context.Context, storage Storage, location *models.BackupLocation, artifact *models.Artifact) ([]Timeline, error) { var timelines [][]Timeline - oplogs, err := ss.getPITROplogs(ctx, location, artifactName) + oplogs, err := s.getPITROplogs(ctx, storage, location, artifact) if err != nil { return nil, errors.Wrap(err, "get slice") } @@ -154,20 +164,30 @@ func (ss *PITRTimerangeService) ListPITRTimeranges(ctx context.Context, artifact return nil, nil } - t, err := gettimelines(oplogs), nil + t, err := getTimelines(oplogs), nil if err != nil { - return nil, errors.Wrapf(err, "get PITR timeranges for backup '%s'", artifactName) + return nil, errors.Wrapf(err, "get PITR timeranges for backup '%s'", artifact.Name) } if len(t) != 0 { timelines = append(timelines, t) } - return mergeTimelines(timelines...), nil + mergedTimelines := mergeTimelines(timelines...) + trimTimelines(mergedTimelines) + + return mergedTimelines, nil +} + +// trimTimelines adds one second to the Start value of every timeline record. Required to fit PBM values. +func trimTimelines(timelines []Timeline) { + for i := range timelines { + timelines[i].Start += 1 + } } // pitrMetaFromFileName parses given file name and returns PITRChunk metadata // it returns nil if the file wasn't parse successfully (e.g. wrong format) -// current fromat is 20200715155939-0.20200715160029-1.oplog.snappy +// current format is 20200715155939-0.20200715160029-1.oplog.snappy // (https://github.com/percona/percona-backup-mongodb/wiki/PITR:-storage-layout) // // !!! should be agreed with pbm/pitr.chunkPath() @@ -226,7 +246,7 @@ func pitrParseTS(tstr string) *primitive.Timestamp { return &ts } -func gettimelines(slices []*oplogChunk) []Timeline { +func getTimelines(slices []*oplogChunk) []Timeline { var tl Timeline var timelines []Timeline var prevEnd primitive.Timestamp @@ -355,3 +375,31 @@ func mergeTimelines(timelines ...[]Timeline) []Timeline { return ret } + +// GetPITRFiles returns list of PITR chunks. If 'until' specified, returns only chunks created before that date, otherwise returns all artifact chunks. +func (s *PBMPITRService) GetPITRFiles( + ctx context.Context, + storage Storage, + location *models.BackupLocation, + artifact *models.Artifact, + until *time.Time, +) ([]*oplogChunk, error) { + opLogs, err := s.getPITROplogs(ctx, storage, location, artifact) + if err != nil { + return nil, err + } + + if until != nil { + var res []*oplogChunk + for _, chunk := range opLogs { + chunkStartTime := time.Unix(int64(chunk.StartTS.T), 0) + // We're checking only start time because when pbm takes snapshot, chunk is being finalizing automatically. + if chunkStartTime.Before(*until) { + res = append(res, chunk) + } + } + return res, nil + } + + return opLogs, nil +} diff --git a/managed/services/backup/pitr_timerange_service_test.go b/managed/services/backup/pitr_timerange_service_test.go index 28ab7e1666..192b93d3e3 100644 --- a/managed/services/backup/pitr_timerange_service_test.go +++ b/managed/services/backup/pitr_timerange_service_test.go @@ -21,10 +21,12 @@ import ( "path" "strings" "testing" + "time" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "go.mongodb.org/mongo-driver/bson/primitive" "github.com/percona/pmm/managed/models" @@ -80,38 +82,7 @@ func TestPitrMetaFromFileName(t *testing.T) { } } -func TestPitrParseTs(t *testing.T) { - tests := []struct { - name string - filename string - expected *primitive.Timestamp - }{ - { - name: "with time and index", - filename: "20220829115611-10", - expected: &primitive.Timestamp{T: uint32(1661774171), I: 10}, - }, - { - name: "time without index", - filename: "20220829120544", - expected: &primitive.Timestamp{T: uint32(1661774744), I: 0}, - }, - { - name: "with invalid timestamp", - filename: "2022", - expected: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ts := pitrParseTS(tt.filename) - assert.Equal(t, tt.expected, ts) - }) - } -} - -func TestListPITRTimelines(t *testing.T) { +func TestGetPITROplogs(t *testing.T) { ctx := context.Background() location := &models.BackupLocation{ S3Config: &models.S3LocationConfig{ @@ -123,8 +94,9 @@ func TestListPITRTimelines(t *testing.T) { }, } + mockedStorage := &MockStorage{} + t.Run("successful", func(t *testing.T) { - mockedStorage := &mockPitrLocationClient{} listedFiles := []minio.FileInfo{ { Name: "rs0/20220829/20220829115611-1.20220829120544-10.oplog.s2", @@ -132,31 +104,28 @@ func TestListPITRTimelines(t *testing.T) { }, } - statFile := minio.FileInfo{ - Name: pitrFSPrefix + "rs0/20220829/20220829115611-1.20220829120544-10.oplog.s2", - Size: 1024, - } - mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(listedFiles, nil) - mockedStorage.On("FileStat", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(statFile, nil) + //statFile := minio.FileInfo{ + // Name: pitrFSPrefix + "rs0/20220829/20220829115611-1.20220829120544-10.oplog.s2", + // Size: 1024, + //} + mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(listedFiles, nil).Once() - ss := NewPITRTimerangeService(mockedStorage) - timelines, err := ss.getPITROplogs(ctx, location, "") + service := NewPBMPITRService() + timelines, err := service.getPITROplogs(ctx, mockedStorage, location, &models.Artifact{}) assert.NoError(t, err) assert.Len(t, timelines, 1) }) t.Run("fails on file list error", func(t *testing.T) { - mockedStorage := &mockPitrLocationClient{} - mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("listing object error")) + mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("listing object error")).Once() - ss := NewPITRTimerangeService(mockedStorage) - timelines, err := ss.getPITROplogs(ctx, location, "") + service := NewPBMPITRService() + timelines, err := service.getPITROplogs(ctx, mockedStorage, location, &models.Artifact{}) assert.Error(t, err) assert.Nil(t, timelines) }) t.Run("skips artifacts with deletion markers", func(t *testing.T) { - mockedStorage := &mockPitrLocationClient{} listedFiles := []minio.FileInfo{ { Name: "rs0/20220829/20220829115611-1.20220829120544-10.oplog.s2", @@ -165,13 +134,46 @@ func TestListPITRTimelines(t *testing.T) { }, } - mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(listedFiles, nil) + mockedStorage.On("List", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(listedFiles, nil).Once() - ss := NewPITRTimerangeService(mockedStorage) - timelines, err := ss.getPITROplogs(ctx, location, "") + service := NewPBMPITRService() + timelines, err := service.getPITROplogs(ctx, mockedStorage, location, &models.Artifact{}) assert.NoError(t, err) assert.Len(t, timelines, 0) }) + + mockedStorage.AssertExpectations(t) +} + +func TestPitrParseTs(t *testing.T) { + tests := []struct { + name string + filename string + expected *primitive.Timestamp + }{ + { + name: "with time and index", + filename: "20220829115611-10", + expected: &primitive.Timestamp{T: uint32(1661774171), I: 10}, + }, + { + name: "time without index", + filename: "20220829120544", + expected: &primitive.Timestamp{T: uint32(1661774744), I: 0}, + }, + { + name: "with invalid timestamp", + filename: "2022", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := pitrParseTS(tt.filename) + assert.Equal(t, tt.expected, ts) + }) + } } func TestPITRMergeTimelines(t *testing.T) { @@ -493,3 +495,57 @@ func printTTL(tlns ...Timeline) string { return strings.Join(ret, ", ") } + +func TestGetPITRFiles(t *testing.T) { + ctx := context.Background() + S3Config := models.S3LocationConfig{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + AccessKey: "access_key", + SecretKey: "secret_key", + BucketName: "example_bucket", + BucketRegion: "us-east-1", + } + location := &models.BackupLocation{ + S3Config: &S3Config, + } + + mockedStorage := &MockStorage{} + service := NewPBMPITRService() + + listedFiles := []minio.FileInfo{ + {Name: "rs0/20230411/20230411112014-2.20230411112507-12.oplog.s2"}, + {Name: "rs0/20230411/20230411112507-12.20230411112514-3.oplog.s2"}, + {Name: "rs0/20230411/20230411112514-3.20230411113007-8.oplog.s2"}, + {Name: "rs0/20230411/20230411113007-8.20230411113014-2.oplog.s2"}, + {Name: "rs0/20230411/20230411113014-2.20230411113507-8.oplog.s2"}, + } + + t.Run("'until' not specified", func(t *testing.T) { + mockedStorage.On("List", ctx, S3Config.Endpoint, S3Config.AccessKey, S3Config.SecretKey, S3Config.BucketName, pitrFSPrefix, "").Return(listedFiles, nil).Twice() + + PITRChunks, err := service.GetPITRFiles(ctx, mockedStorage, location, &models.Artifact{}, nil) + require.NoError(t, err) + + expectedRes, err := service.getPITROplogs(ctx, mockedStorage, location, &models.Artifact{}) + require.NoError(t, err) + + assert.Equal(t, expectedRes, PITRChunks) + }) + + t.Run("'until' specified", func(t *testing.T) { + mockedStorage.On("List", ctx, S3Config.Endpoint, S3Config.AccessKey, S3Config.SecretKey, S3Config.BucketName, pitrFSPrefix, "").Return(listedFiles, nil).Twice() + + until, err := time.Parse("2006-01-02T15:04:05", "2023-04-11T11:25:14") + require.NoError(t, err) + + PITRChunks, err := service.GetPITRFiles(ctx, mockedStorage, location, &models.Artifact{}, &until) + require.NoError(t, err) + + expectedRes, err := service.getPITROplogs(ctx, mockedStorage, location, &models.Artifact{}) + require.NoError(t, err) + + assert.Equal(t, expectedRes[:2], PITRChunks) + }) + + mockedStorage.AssertExpectations(t) +} diff --git a/managed/services/backup/removal_service.go b/managed/services/backup/removal_service.go index 92aa2f204f..ae0733acdf 100644 --- a/managed/services/backup/removal_service.go +++ b/managed/services/backup/removal_service.go @@ -17,6 +17,9 @@ package backup import ( "context" + "database/sql" + "path" + "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -27,89 +30,183 @@ import ( "github.com/percona/pmm/managed/models" ) -// RemovalService manage removing of backup artifacts. +// RemovalService manages removing of backup artifacts. type RemovalService struct { - l *logrus.Entry - db *reform.DB - s3 s3 + l *logrus.Entry + db *reform.DB + pbmPITRService pbmPITRService } // NewRemovalService creates new backup removal service. -func NewRemovalService(db *reform.DB, s3 s3) *RemovalService { +func NewRemovalService(db *reform.DB, pbmPITRService pbmPITRService) *RemovalService { return &RemovalService{ - l: logrus.WithField("component", "services/backup/removal"), - db: db, - s3: s3, + l: logrus.WithField("component", "services/backup/removal"), + db: db, + pbmPITRService: pbmPITRService, } } -// DeleteArtifact deletes specified artifact. -func (s *RemovalService) DeleteArtifact(ctx context.Context, artifactID string, removeFiles bool) error { - artifactName, s3Config, err := s.beginDeletingArtifact(artifactID) +// DeleteArtifact deletes specified artifact along with files if specified. +func (s *RemovalService) DeleteArtifact(storage Storage, artifactID string, removeFiles bool) error { + artifact, prevStatus, err := s.lockArtifact(artifactID, models.DeletingBackupStatus) if err != nil { return err } - if s3Config != nil && removeFiles { - if err := s.s3.RemoveRecursive( - ctx, - s3Config.Endpoint, - s3Config.AccessKey, - s3Config.SecretKey, - s3Config.BucketName, - // Recursive listing finds all the objects with the specified prefix. - // There could be a problem e.g. when we have artifacts `backup-daily` and `backup-daily-1`, so - // listing by prefix `backup-daily` gives us both artifacts. - // To avoid such a situation we need to append a slash. - artifactName+"/"); err != nil { - if _, updateErr := models.UpdateArtifact(s.db.Querier, artifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.FailedToDeleteBackupStatus), - }); updateErr != nil { - s.l.WithError(updateErr). - Errorf("failed to set status %q for artifact %q", models.FailedToDeleteBackupStatus, artifactID) - } + // For cases when it's not clear can files be removed or not - cannot tell new from legacy artifacts. + if prevStatus != models.SuccessBackupStatus && len(artifact.MetadataList) == 0 { + removeFiles = false + } + defer func() { + if err != nil { + _ = s.setArtifactStatus(artifactID, models.FailedToDeleteBackupStatus) + } + }() + + restoreItems, err := models.FindRestoreHistoryItems(s.db.Querier, models.RestoreHistoryItemFilters{ + ArtifactID: artifactID, + }) + if err != nil { + return err + } + + for _, ri := range restoreItems { + if err = models.RemoveRestoreHistoryItem(s.db.Querier, ri.ID); err != nil { return err } } - return s.db.InTransaction(func(tx *reform.TX) error { - restoreItems, err := models.FindRestoreHistoryItems(tx.Querier, models.RestoreHistoryItemFilters{ - ArtifactID: artifactID, - }) + removeFilesHelper := func() { + var err error + defer func() { + if err != nil { + _ = s.setArtifactStatus(artifactID, models.FailedToDeleteBackupStatus) + } + }() + + location, err := models.FindBackupLocationByID(s.db.Querier, artifact.LocationID) if err != nil { - return err + s.l.WithError(err).Error("couldn't get location") + return + } + + service, err := models.FindServiceByID(s.db.Querier, artifact.ServiceID) + if err != nil { + s.l.WithError(err).Error("couldn't get service") + return + } + + if err = s.deleteArtifactFiles(context.Background(), storage, location, artifact, len(artifact.MetadataList)); err != nil { + s.l.WithError(err).Error("couldn't delete artifact files") + return } - for _, ri := range restoreItems { - if err := models.RemoveRestoreHistoryItem(tx.Querier, ri.ID); err != nil { - return err + if service.ServiceType == models.MongoDBServiceType && artifact.Mode == models.PITR { + if err = s.deleteArtifactPITRChunks(context.Background(), storage, location, artifact, nil); err != nil { + s.l.WithError(err).Error("couldn't delete artifact PITR chunks") + return } } - return models.DeleteArtifact(tx.Querier, artifactID) - }) + err = models.DeleteArtifact(s.db.Querier, artifactID) + if err != nil { + s.l.WithError(err).Error("couldn't delete artifact") + } + } + + if removeFiles { + go removeFilesHelper() + return nil + } + + err = models.DeleteArtifact(s.db.Querier, artifactID) + if err != nil { + return err + } + + return nil +} + +// TrimPITRArtifact removes first N first records from PITR artifact. Removes snapshots, PITR chunks and corresponding data from database. +func (s *RemovalService) TrimPITRArtifact(storage Storage, artifactID string, firstN int) error { + artifact, oldStatus, err := s.lockArtifact(artifactID, models.CleanupInProgressStatus) + if err != nil { + return err + } + + go func() { + var err error + defer func() { + if err != nil { + s.l.Error("Couldn't trim artifact files. Restoring is not guaranteed for files outside of retention policy limit.") + // We need to release PITR artifact in case of error, otherwise it will be blocked for restoring. + if err = s.releaseArtifact(artifactID, oldStatus); err != nil { + s.l.WithError(err).Errorf("couldn't unlock artifact %q", artifactID) + return + } + } + }() + + location, err := models.FindBackupLocationByID(s.db.Querier, artifact.LocationID) + if err != nil { + return + } + + if err = s.deleteArtifactFiles(context.Background(), storage, location, artifact, firstN); err != nil { + s.l.WithError(err).Error("couldn't delete artifact files") + return + } + + if err = artifact.MetadataRemoveFirstN(s.db.Querier, uint32(firstN)); err != nil { + s.l.WithError(err).Error("couldn't delete artifact metadata") + return + } + + if err = s.deleteArtifactPITRChunks(context.Background(), storage, location, artifact, artifact.MetadataList[0].RestoreTo); err != nil { + s.l.WithError(err).Error("couldn't delete artifact PITR chunks") + return + } + + if err = s.releaseArtifact(artifactID, oldStatus); err != nil { + s.l.WithError(err).Errorf("couldn't unlock artifact %q", artifactID) + return + } + }() + + return nil } -// beginDeletingArtifact checks if the artifact isn't in use at the moment and sets deleting status, +// lockArtifact checks if the artifact isn't in use at the moment and sets deleting status, // so it will not be used to restore backup. -func (s *RemovalService) beginDeletingArtifact( - artifactID string, -) (string, *models.S3LocationConfig, error) { - var s3Config *models.S3LocationConfig - var artifactName string - if err := s.db.InTransaction(func(tx *reform.TX) error { - artifact, err := s.canDeleteArtifact(tx.Querier, artifactID) +func (s *RemovalService) lockArtifact(artifactID string, lockingStatus models.BackupStatus) (*models.Artifact, models.BackupStatus, error) { + var currentStatus models.BackupStatus + + if models.IsArtifactFinalStatus(lockingStatus) { + return nil, "", errors.Wrapf(ErrIncorrectArtifactStatus, "couldn't lock artifact, requested new status %s (present in list of final statuses) for artifact %s", + lockingStatus, artifactID) + } + + var ( + artifact *models.Artifact + err error + ) + + if errTx := s.db.InTransactionContext(s.db.Querier.Context(), &sql.TxOptions{Isolation: sql.LevelSerializable}, func(tx *reform.TX) error { + artifact, err = models.FindArtifactByID(tx.Querier, artifactID) if err != nil { return err } - artifactName = artifact.Name + currentStatus = artifact.Status + + if !models.IsArtifactFinalStatus(artifact.Status) { + return errors.Wrapf(ErrIncorrectArtifactStatus, "artifact with ID %q isn't in a final status", artifact.ID) + } - inProgressStatus := models.InProgressRestoreStatus restoreItems, err := models.FindRestoreHistoryItems(tx.Querier, models.RestoreHistoryItemFilters{ - ArtifactID: artifactID, - Status: &inProgressStatus, + ArtifactID: artifact.ID, + Status: models.InProgressRestoreStatus.Pointer(), }) if err != nil { return err @@ -117,52 +214,126 @@ func (s *RemovalService) beginDeletingArtifact( if len(restoreItems) != 0 { return status.Errorf(codes.FailedPrecondition, "Cannot delete artifact with ID %q: "+ - "artifact is used by currently running restore operation.", artifactID) + "artifact is used by currently running restore operation.", artifact.ID) } - location, err := models.FindBackupLocationByID(tx.Querier, artifact.LocationID) - if err != nil { + if _, err := models.UpdateArtifact(tx.Querier, artifact.ID, models.UpdateArtifactParams{ + Status: lockingStatus.Pointer(), + }); err != nil { return err } - s3Config = location.S3Config + return nil + }); errTx != nil { + return nil, "", errTx + } - if _, err := models.UpdateArtifact(tx.Querier, artifactID, models.UpdateArtifactParams{ - Status: models.BackupStatusPointer(models.DeletingBackupStatus), - }); err != nil { - return err + return artifact, currentStatus, nil +} + +// releaseArtifact releases artifact lock by setting one of the final artifact statuses. +func (s *RemovalService) releaseArtifact(artifactID string, setStatus models.BackupStatus) error { + if !models.IsArtifactFinalStatus(setStatus) { + return errors.Wrapf(ErrIncorrectArtifactStatus, "couldn't release artifact, requested new status %s (not present in list of final statuses) for artifact %s", + setStatus, artifactID) + } + + if err := s.setArtifactStatus(artifactID, setStatus); err != nil { + return err + } + return nil +} + +// setArtifactStatus sets provided artifact status. Write error logs if status cannot be set. +func (s *RemovalService) setArtifactStatus(artifactID string, status models.BackupStatus) error { + if _, err := models.UpdateArtifact(s.db.Querier, artifactID, models.UpdateArtifactParams{ + Status: status.Pointer(), + }); err != nil { + s.l.WithError(err).Errorf("failed to set status %q for artifact %q", status, artifactID) + return err + } + return nil +} + +// deleteArtifactFiles deletes artifact files. +// If artifact represents a single snapshot, there is only one record representing artifact files. +// If artifact represents continuous backup (PITR), artifact may contain several records, +// and it's possible to delete only first N of them to implement retention policy. +func (s *RemovalService) deleteArtifactFiles(ctx context.Context, storage Storage, location *models.BackupLocation, artifact *models.Artifact, firstN int) error { + s3Config := location.S3Config + if storage == nil || s3Config == nil { + s.l.Debug("Storage not specified.") + return nil + } + + // Old artifact records don't contain representation file list. + if len(artifact.MetadataList) == 0 { + folderName := artifact.Name + "/" + + s.l.Debugf("Deleting folder %s.", folderName) + if err := storage.RemoveRecursive(ctx, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, folderName); err != nil { + return errors.Wrapf(err, "failed to remove folder %s of artifact %s", folderName, artifact.ID) } return nil - }); err != nil { - return "", nil, err } - return artifactName, s3Config, nil + for _, metadata := range artifact.MetadataList[:firstN] { + for _, file := range metadata.FileList { + if file.IsDirectory { + // Recursive listing finds all the objects with the specified prefix. + // There could be a problem e.g. when we have artifacts `backup-daily` and `backup-daily-1`, so + // listing by prefix `backup-daily` gives us both artifacts. + // To avoid such a situation we need to append a slash. + folderName := path.Join(artifact.Folder, file.Name) + "/" + s.l.Debugf("Deleting folder %s.", folderName) + if err := storage.RemoveRecursive(ctx, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, folderName); err != nil { + return errors.Wrapf(err, "failed to remove folder %s of artifact %s", folderName, artifact.ID) + } + } else { + fileName := path.Join(artifact.Folder, file.Name) + s.l.Debugf("Deleting file %s.", fileName) + if err := storage.Remove(ctx, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, fileName); err != nil { + return errors.Wrapf(err, "failed to remove file %s of artifact %s", file.Name, artifact.ID) + } + } + } + } + + return nil } -func (s *RemovalService) canDeleteArtifact(q *reform.Querier, artifactID string) (*models.Artifact, error) { - artifact, err := models.FindArtifactByID(q, artifactID) - switch { - case err == nil: - case errors.Is(err, models.ErrNotFound): - return nil, status.Errorf(codes.NotFound, "Artifact with ID %q not found.", artifactID) - default: - return nil, err - } - - switch artifact.Status { - case models.SuccessBackupStatus, - models.ErrorBackupStatus, - models.FailedToDeleteBackupStatus: - case models.DeletingBackupStatus, - models.InProgressBackupStatus, - models.PausedBackupStatus, - models.PendingBackupStatus: - return nil, status.Errorf(codes.FailedPrecondition, "Artifact with ID %q isn't in the final state.", artifactID) - default: - return nil, status.Errorf(codes.Internal, "Unhandled status %q", artifact.Status) - } - - return artifact, nil +// deleteArtifactPITRChunks deletes artifact PITR chunks. If "until" provided, deletes only chunks created before that time. Deletes all artifact chunks otherwise. +func (s *RemovalService) deleteArtifactPITRChunks( + ctx context.Context, + storage Storage, + location *models.BackupLocation, + artifact *models.Artifact, + until *time.Time, +) error { + s3Config := location.S3Config + if storage == nil || s3Config == nil { + s.l.Debug("Storage not specified.") + return nil + } + + chunks, err := s.pbmPITRService.GetPITRFiles(ctx, storage, location, artifact, until) + if err != nil { + return errors.Wrap(err, "failed to get pitr chunks") + } + + if len(chunks) == 0 { + s.l.Debug("No chunks to delete.") + return nil + } + + for _, chunk := range chunks { + s.l.Debugf("Deleting %s.", chunk.FName) + + if err := storage.Remove(ctx, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, chunk.FName); err != nil { + return errors.Wrapf(err, "failed to remove pitr chunk '%s' (%v) from storage", chunk.FName, chunk) + } + } + + return nil } diff --git a/managed/services/backup/removal_service_test.go b/managed/services/backup/removal_service_test.go index cb3942d58e..6b2356abc6 100644 --- a/managed/services/backup/removal_service_test.go +++ b/managed/services/backup/removal_service_test.go @@ -17,7 +17,9 @@ package backup import ( "context" + "database/sql" "testing" + "time" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -31,72 +33,495 @@ import ( ) func TestDeleteArtifact(t *testing.T) { - ctx := context.Background() sqlDB := testdb.Open(t, models.SkipFixtures, nil) - db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) - mockedS3 := &mockS3{} - removalService := NewRemovalService(db, mockedS3) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + mockedPbmPITRService := &mockPbmPITRService{} + removalService := NewRemovalService(db, mockedPbmPITRService) agent := setup(t, db.Querier, models.MySQLServiceType, "test-service") endpoint := "https://s3.us-west-2.amazonaws.com/" accessKey, secretKey, bucketName, bucketRegion := "access_key", "secret_key", "example_bucket", "us-east-2" + s3Config := &models.S3LocationConfig{ + Endpoint: endpoint, + AccessKey: accessKey, + SecretKey: secretKey, + BucketName: bucketName, + BucketRegion: bucketRegion, + } + locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ Name: "Test location", Description: "Test description", BackupLocationConfig: models.BackupLocationConfig{ - S3Config: &models.S3LocationConfig{ - Endpoint: endpoint, - AccessKey: accessKey, - SecretKey: secretKey, - BucketName: bucketName, - BucketRegion: bucketRegion, + S3Config: s3Config, + }, + }) + require.NoError(t, err) + + createArtifact := func(status models.BackupStatus) *models.Artifact { + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: "artifact_name", + Vendor: "MySQL", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: status, + }) + require.NoError(t, err) + return artifact + } + + mockedStorage := &MockStorage{} + + t.Run("artifact not in final status", func(t *testing.T) { + artifact := createArtifact(models.PendingBackupStatus) + t.Cleanup(func() { + err := models.DeleteArtifact(db.Querier, artifact.ID) + require.NoError(t, err) + }) + + err := removalService.DeleteArtifact(mockedStorage, artifact.ID, false) + require.ErrorIs(t, err, ErrIncorrectArtifactStatus) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.PendingBackupStatus, artifact.Status) + }) + + t.Run("failed to remove restore history sets artifact error status", func(t *testing.T) { + artifact := createArtifact(models.SuccessBackupStatus) + t.Cleanup(func() { + err := models.DeleteArtifact(db.Querier, artifact.ID) + require.NoError(t, err) + }) + + ri, err := models.CreateRestoreHistoryItem(db.Querier, models.CreateRestoreHistoryItemParams{ + ArtifactID: artifact.ID, + ServiceID: *agent.ServiceID, + Status: models.SuccessRestoreStatus, + }) + require.NoError(t, err) + + go func() { + tx, err := db.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelSerializable}) + require.NoError(t, err) + + err = models.RemoveRestoreHistoryItem(tx.Querier, ri.ID) + require.NoError(t, err) + + time.Sleep(time.Second * 3) + + err = tx.Commit() + assert.NoError(t, err) + }() + + time.Sleep(time.Second) + + err = removalService.DeleteArtifact(mockedStorage, artifact.ID, false) + require.Error(t, err) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.FailedToDeleteBackupStatus, artifact.Status) + }) + + t.Run("error during removing files", func(t *testing.T) { + artifact := createArtifact(models.SuccessBackupStatus) + t.Cleanup(func() { + err := models.DeleteArtifact(db.Querier, artifact.ID) + require.NoError(t, err) + }) + + someError := errors.New("some error") + mockedStorage.On("RemoveRecursive", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, artifact.Name+"/"). + Return(someError).Once() + + err := removalService.DeleteArtifact(mockedStorage, artifact.ID, true) + // No error because removing files running in goroutine. + require.NoError(t, err) + + // Removing files running in goroutine, need to wait some time. + time.Sleep(time.Second * 1) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.FailedToDeleteBackupStatus, artifact.Status) + }) + + t.Run("successful delete snapshot", func(t *testing.T) { + artifact := createArtifact(models.SuccessBackupStatus) + + mockedStorage.On("RemoveRecursive", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, artifact.Name+"/"). + Return(nil).Once() + + err := removalService.DeleteArtifact(mockedStorage, artifact.ID, true) + assert.NoError(t, err) + + // Removing files running in goroutine, need to wait some time. + time.Sleep(time.Second * 3) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + assert.Nil(t, artifact) + assert.ErrorIs(t, err, models.ErrNotFound) + }) + + t.Run("successful delete pitr", func(t *testing.T) { + agent := setup(t, db.Querier, models.MongoDBServiceType, "test-service2") + + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Folder: "artifact_folder", + }) + require.NoError(t, err) + + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}, {Name: "file3"}}, }, + }) + require.NoError(t, err) + + chunksRet := []*oplogChunk{ + {FName: "chunk1"}, + {FName: "chunk2"}, + {FName: "chunk3"}, + } + + mockedStorage.On("RemoveRecursive", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/dir1/"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file1"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file2"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file3"). + Return(nil).Once() + + mockedPbmPITRService.On("GetPITRFiles", mock.Anything, mock.Anything, locationRes, artifact, mock.Anything).Return(chunksRet, nil).Once() + + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk1"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk2"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk3"). + Return(nil).Once() + + err = removalService.DeleteArtifact(mockedStorage, artifact.ID, true) + assert.NoError(t, err) + + // Removing files running in goroutine, need to wait some time. + time.Sleep(time.Second * 3) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + assert.Nil(t, artifact) + assert.ErrorIs(t, err, models.ErrNotFound) + }) + + mockedPbmPITRService.AssertExpectations(t) + mockedStorage.AssertExpectations(t) +} + +func TestTrimPITRArtifact(t *testing.T) { + sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + mockedPbmPITRService := &mockPbmPITRService{} + removalService := NewRemovalService(db, mockedPbmPITRService) + mockedStorage := &MockStorage{} + + agent := setup(t, db.Querier, models.MongoDBServiceType, "test-service2") + + endpoint := "https://s3.us-west-2.amazonaws.com/" + accessKey, secretKey, bucketName, bucketRegion := "access_key", "secret_key", "example_bucket", "us-east-2" + + s3Config := &models.S3LocationConfig{ + Endpoint: endpoint, + AccessKey: accessKey, + SecretKey: secretKey, + BucketName: bucketName, + BucketRegion: bucketRegion, + } + + locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ + Name: "Test location", + Description: "Test description", + BackupLocationConfig: models.BackupLocationConfig{ + S3Config: s3Config, }, }) require.NoError(t, err) artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ Name: "artifact_name", - Vendor: "MySQL", + Vendor: "MongoDB", LocationID: locationRes.ID, ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.PendingBackupStatus, + Folder: "artifact_folder", + }) + require.NoError(t, err) + + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}, {Name: "file3"}}, + }, }) require.NoError(t, err) - t.Run("failed to delete from s3", func(t *testing.T) { - mockedS3.On("RemoveRecursive", mock.Anything, endpoint, accessKey, secretKey, bucketName, artifact.Name+"/"). - Return(errors.Errorf("failed to remove")). - Run(func(args mock.Arguments) { - artifact, err := models.FindArtifactByID(db.Querier, artifact.ID) - require.NoError(t, err) - require.NotNil(t, artifact) - assert.Equal(t, artifact.Status, models.DeletingBackupStatus) - }).Once() + restoreTo := time.Unix(123, 456) - err := removalService.DeleteArtifact(ctx, artifact.ID, true) - require.EqualError(t, err, "failed to remove") + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir2", IsDirectory: true}, {Name: "file4"}, {Name: "file5"}, {Name: "file6"}}, + RestoreTo: &restoreTo, + }, + }) + require.NoError(t, err) - artifact, err := models.FindArtifactByID(db.Querier, artifact.ID) + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir3", IsDirectory: true}, {Name: "file7"}, {Name: "file8"}, {Name: "file9"}}, + }, + }) + require.NoError(t, err) + + t.Run("artifact not in final status", func(t *testing.T) { + err := removalService.TrimPITRArtifact(mockedStorage, artifact.ID, 1) + require.ErrorIs(t, err, ErrIncorrectArtifactStatus) + + time.Sleep(time.Second * 2) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) require.NoError(t, err) require.NotNil(t, artifact) - assert.Equal(t, artifact.Status, models.FailedToDeleteBackupStatus) + assert.Equal(t, models.PendingBackupStatus, artifact.Status) + assert.Equal(t, 3, len(artifact.MetadataList)) }) - t.Run("successful delete", func(t *testing.T) { - mockedS3.On("RemoveRecursive", mock.Anything, endpoint, accessKey, secretKey, bucketName, - artifact.Name+"/").Return(nil).Once() + t.Run("error during removing files sets artifact status", func(t *testing.T) { + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{Status: models.SuccessBackupStatus.Pointer()}) + require.NoError(t, err) - err = removalService.DeleteArtifact(ctx, artifact.ID, true) - assert.NoError(t, err) + mockedStorage.On("RemoveRecursive", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/dir1/"). + Return(errors.New("some error")).Once() + + err := removalService.TrimPITRArtifact(mockedStorage, artifact.ID, 1) + require.NoError(t, err) + + time.Sleep(time.Second * 2) - _, err := models.FindArtifactByID(db.Querier, artifact.ID) - assert.True(t, errors.Is(err, models.ErrNotFound)) + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.SuccessBackupStatus, artifact.Status) + assert.Equal(t, 3, len(artifact.MetadataList)) + }) + + t.Run("successful", func(t *testing.T) { + chunksRet := []*oplogChunk{ + {FName: "chunk1"}, + {FName: "chunk2"}, + {FName: "chunk3"}, + } + + mockedStorage.On("RemoveRecursive", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/dir1/"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file1"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file2"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "artifact_folder/file3"). + Return(nil).Once() + + mockedPbmPITRService.On("GetPITRFiles", mock.Anything, mock.Anything, locationRes, mock.Anything, artifact.MetadataList[1].RestoreTo).Return(chunksRet, nil).Once() + + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk1"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk2"). + Return(nil).Once() + mockedStorage.On("Remove", mock.Anything, s3Config.Endpoint, s3Config.AccessKey, s3Config.SecretKey, s3Config.BucketName, "chunk3"). + Return(nil).Once() + + err := removalService.TrimPITRArtifact(mockedStorage, artifact.ID, 1) + require.NoError(t, err) + + time.Sleep(time.Second * 2) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.SuccessBackupStatus, artifact.Status) + assert.Equal(t, 2, len(artifact.MetadataList)) + }) + + mockedStorage.AssertExpectations(t) + mockedPbmPITRService.AssertExpectations(t) +} + +func TestLockArtifact(t *testing.T) { + sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + agent := setup(t, db.Querier, models.MongoDBServiceType, "test-service3") + + locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ + Name: "Test location", + Description: "Test description", + BackupLocationConfig: models.BackupLocationConfig{ + FilesystemConfig: &models.FilesystemLocationConfig{Path: "/"}, + }, + }) + require.NoError(t, err) + + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.PendingBackupStatus, + Folder: "artifact_folder", + }) + require.NoError(t, err) + + removalService := NewRemovalService(db, nil) + + t.Run("wrong locking status", func(t *testing.T) { + res, oldStatus, err := removalService.lockArtifact(artifact.ID, models.FailedToDeleteBackupStatus) + assert.Nil(t, res) + assert.Empty(t, oldStatus) + assert.ErrorIs(t, err, ErrIncorrectArtifactStatus) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.PendingBackupStatus, artifact.Status) + }) + + t.Run("artifact not in final status", func(t *testing.T) { + res, oldStatus, err := removalService.lockArtifact(artifact.ID, models.DeletingBackupStatus) + assert.Nil(t, res) + assert.Empty(t, oldStatus) + assert.ErrorIs(t, err, ErrIncorrectArtifactStatus) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.PendingBackupStatus, artifact.Status) + }) + + t.Run("restore in progress", func(t *testing.T) { + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{Status: models.SuccessBackupStatus.Pointer()}) + require.NoError(t, err) + + ri, err := models.CreateRestoreHistoryItem(db.Querier, models.CreateRestoreHistoryItemParams{ + ArtifactID: artifact.ID, + ServiceID: *agent.ServiceID, + Status: models.InProgressRestoreStatus, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err := models.RemoveRestoreHistoryItem(db.Querier, ri.ID) + require.NoError(t, err) + }) + + res, oldStatus, err := removalService.lockArtifact(artifact.ID, models.DeletingBackupStatus) + assert.Nil(t, res) + assert.Empty(t, oldStatus) + assert.Contains(t, err.Error(), "artifact is used by currently running restore operation") + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.SuccessBackupStatus, artifact.Status) + }) + + t.Run("success", func(t *testing.T) { + res, oldStatus, err := removalService.lockArtifact(artifact.ID, models.DeletingBackupStatus) + require.NotNil(t, res) + assert.Equal(t, models.SuccessBackupStatus, oldStatus) + require.NoError(t, err) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.DeletingBackupStatus, artifact.Status) + }) +} + +func TestReleaseArtifact(t *testing.T) { + sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) }) - mock.AssertExpectationsForObjects(t, mockedS3) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + agent := setup(t, db.Querier, models.MongoDBServiceType, "test-service3") + + locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ + Name: "Test location", + Description: "Test description", + BackupLocationConfig: models.BackupLocationConfig{ + FilesystemConfig: &models.FilesystemLocationConfig{Path: "/"}, + }, + }) + require.NoError(t, err) + + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.DeletingBackupStatus, + Folder: "artifact_folder", + }) + require.NoError(t, err) + + removalService := NewRemovalService(db, nil) + + t.Run("wrong releasing status", func(t *testing.T) { + err := removalService.releaseArtifact(artifact.ID, models.PendingBackupStatus) + assert.ErrorIs(t, err, ErrIncorrectArtifactStatus) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.DeletingBackupStatus, artifact.Status) + }) + + t.Run("success", func(t *testing.T) { + err := removalService.releaseArtifact(artifact.ID, models.SuccessBackupStatus) + assert.NoError(t, err) + + artifact, err = models.FindArtifactByID(db.Querier, artifact.ID) + require.NoError(t, err) + require.NotNil(t, artifact) + assert.Equal(t, models.SuccessBackupStatus, artifact.Status) + }) } diff --git a/managed/services/backup/retention_service.go b/managed/services/backup/retention_service.go index 4d4882ac22..c46a56e1ec 100644 --- a/managed/services/backup/retention_service.go +++ b/managed/services/backup/retention_service.go @@ -16,8 +16,6 @@ package backup import ( - "context" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "gopkg.in/reform.v1" @@ -43,54 +41,95 @@ func NewRetentionService(db *reform.DB, removalSVC removalService) *RetentionSer // EnforceRetention enforce retention on provided scheduled backup task // it removes any old successful artifacts below retention threshold. -func (s *RetentionService) EnforceRetention(ctx context.Context, scheduleID string) error { - artifacts, retention, err := s.findArtifacts(s.db.Querier, scheduleID) +func (s *RetentionService) EnforceRetention(scheduleID string) error { + task, err := models.FindScheduledTaskByID(s.db.Querier, scheduleID) if err != nil { return err } - if retention == 0 || int(retention) > len(artifacts) { - return nil + retention, err := task.Retention() + if err != nil { + return err } - for _, artifact := range artifacts[retention:] { - if err := s.removalSVC.DeleteArtifact(ctx, artifact.ID, true); err != nil { - return err - } + if retention == 0 { + return nil } - return nil -} + mode, err := task.Mode() + if err != nil { + return err + } -// findArtifacts returns successful artifacts belong to scheduled task and it's retention. -func (s *RetentionService) findArtifacts(q *reform.Querier, scheduleID string) ([]*models.Artifact, uint32, error) { - var retention uint32 + locationID, err := task.LocationID() + if err != nil { + return err + } - task, err := models.FindScheduledTaskByID(q, scheduleID) + location, err := models.FindBackupLocationByID(s.db.Querier, locationID) if err != nil { - return nil, retention, err + return err } - switch task.Type { - case models.ScheduledMySQLBackupTask: - retention = task.Data.MySQLBackupTask.Retention - case models.ScheduledMongoDBBackupTask: - retention = task.Data.MongoDBBackupTask.Retention + storage := GetStorageForLocation(location) + + switch mode { + case models.Snapshot: + err = s.retentionSnapshot(storage, scheduleID, retention) + case models.PITR: + err = s.retentionPITR(storage, scheduleID, retention) default: - return nil, retention, errors.Errorf("invalid backup type %s", task.Type) + s.l.Warnf("Retention policy is not implemented for backup mode %s", mode) + return nil } - if retention == 0 { - return nil, retention, nil + return err +} + +func (s *RetentionService) retentionSnapshot(storage Storage, scheduleID string, retention uint32) error { + artifacts, err := models.FindArtifacts(s.db.Querier, models.ArtifactFilters{ + ScheduleID: scheduleID, + Status: models.SuccessBackupStatus, + }) + if err != nil { + return err + } + + if int(retention) >= len(artifacts) { + return nil + } + + for _, artifact := range artifacts[retention:] { + if err := s.removalSVC.DeleteArtifact(storage, artifact.ID, true); err != nil { + return err + } } - artifacts, err := models.FindArtifacts(q, models.ArtifactFilters{ + return nil +} + +func (s *RetentionService) retentionPITR(storage Storage, scheduleID string, retention uint32) error { + artifacts, err := models.FindArtifacts(s.db.Querier, models.ArtifactFilters{ ScheduleID: scheduleID, Status: models.SuccessBackupStatus, }) if err != nil { - return nil, 0, err + return err + } + + if len(artifacts) == 0 { + return nil + } + + if len(artifacts) > 1 { + return errors.Errorf("Can be only one artifact entity for PITR in the database but found %d", len(artifacts)) + } + + artifact := artifacts[0] + trimBy := len(artifact.MetadataList) - int(retention) + if trimBy <= 0 { + return nil } - return artifacts, retention, nil + return s.removalSVC.TrimPITRArtifact(storage, artifact.ID, trimBy) } diff --git a/managed/services/backup/retention_service_test.go b/managed/services/backup/retention_service_test.go index fcf3735ea3..499bdf6cf5 100644 --- a/managed/services/backup/retention_service_test.go +++ b/managed/services/backup/retention_service_test.go @@ -16,7 +16,6 @@ package backup import ( - "context" "testing" "github.com/brianvoe/gofakeit/v6" @@ -31,24 +30,20 @@ import ( ) func TestEnsureRetention(t *testing.T) { - ctx := context.Background() sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - mockedS3 := &mockS3{} - removalService := NewRemovalService(db, mockedS3) - retentionService := NewRetentionService(db, removalService) - mockedS3.On("RemoveRecursive", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, - mock.Anything).Return(nil) + mockedRemovalService := &mockRemovalService{} + retentionService := NewRetentionService(db, mockedRemovalService) agent := setup(t, db.Querier, models.MySQLServiceType, "test-service") endpoint := "https://s3.us-west-2.amazonaws.com/" accessKey, secretKey, bucketName, bucketRegion := "access_key", "secret_key", "example_bucket", "us-east-2" - t.Cleanup(func() { - require.NoError(t, sqlDB.Close()) - }) - locationRes, err := models.CreateBackupLocation(db.Querier, models.CreateBackupLocationParams{ Name: "Test location", Description: "Test description", @@ -64,77 +59,187 @@ func TestEnsureRetention(t *testing.T) { }) require.NoError(t, err) - task, err := models.CreateScheduledTask(db.Querier, models.CreateScheduledTaskParams{ - CronExpression: "* * * * *", - Type: models.ScheduledMongoDBBackupTask, - Data: &models.ScheduledTaskData{ - MongoDBBackupTask: &models.MongoBackupTaskData{ - CommonBackupTaskData: models.CommonBackupTaskData{ - ServiceID: *agent.ServiceID, - LocationID: locationRes.ID, - Name: "test", - Retention: 0, - Mode: models.Snapshot, + t.Run("wrong task mode", func(t *testing.T) { + wrongModetask, err := models.CreateScheduledTask(db.Querier, models.CreateScheduledTaskParams{ + CronExpression: "* * * * *", + Type: models.ScheduledMongoDBBackupTask, + Data: &models.ScheduledTaskData{ + MongoDBBackupTask: &models.MongoBackupTaskData{ + CommonBackupTaskData: models.CommonBackupTaskData{ + Name: "test", + Mode: "wrong backup mode", + }, }, }, - }, - Disabled: false, + }) + require.NoError(t, err) + + // Returns nil, no dependency calls. + err = retentionService.EnforceRetention(wrongModetask.ID) + assert.NoError(t, err) }) - require.NoError(t, err) - createArtifact := func() { - _, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: gofakeit.Name(), - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, - ScheduleID: task.ID, + t.Run("successful snapshot", func(t *testing.T) { + task, err := models.CreateScheduledTask(db.Querier, models.CreateScheduledTaskParams{ + CronExpression: "* * * * *", + Type: models.ScheduledMongoDBBackupTask, + Data: &models.ScheduledTaskData{ + MongoDBBackupTask: &models.MongoBackupTaskData{ + CommonBackupTaskData: models.CommonBackupTaskData{ + ServiceID: *agent.ServiceID, + LocationID: locationRes.ID, + Name: "test2", + Retention: 0, + Mode: models.Snapshot, + }, + }, + }, + Disabled: false, }) require.NoError(t, err) - } - require.NoError(t, err) - countArtifacts := func() int { - artifacts, err := models.FindArtifacts(db.Querier, models.ArtifactFilters{ - ScheduleID: task.ID, - Status: models.SuccessBackupStatus, - }) + createArtifact := func() { + _, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + }) + require.NoError(t, err) + } require.NoError(t, err) - return len(artifacts) - } - changeRetention := func(retention uint32) { - task.Data.MongoDBBackupTask.Retention = retention - task, err = models.ChangeScheduledTask(db.Querier, task.ID, models.ChangeScheduledTaskParams{ - Data: task.Data, + countArtifacts := func() int { + artifacts, err := models.FindArtifacts(db.Querier, models.ArtifactFilters{ + ScheduleID: task.ID, + Status: models.SuccessBackupStatus, + }) + require.NoError(t, err) + return len(artifacts) + } + + deleteArtifacts := func(args mock.Arguments) { + artifacts, err := models.FindArtifacts(db.Querier, models.ArtifactFilters{ + ScheduleID: task.ID, + Status: models.SuccessBackupStatus, + }) + require.NoError(t, err) + require.NotEqual(t, len(artifacts), 0) + + err = models.DeleteArtifact(db.Querier, artifacts[0].ID) + require.NoError(t, err) + } + + changeRetention := func(retention uint32) { + task.Data.MongoDBBackupTask.Retention = retention + task, err = models.ChangeScheduledTask(db.Querier, task.ID, models.ChangeScheduledTaskParams{ + Data: task.Data, + }) + require.NoError(t, err) + } + + createArtifact() + assert.Equal(t, 1, countArtifacts()) + createArtifact() + assert.NoError(t, retentionService.EnforceRetention(task.ID)) + assert.Equal(t, 2, countArtifacts()) + + createArtifact() + createArtifact() + createArtifact() + assert.NoError(t, retentionService.EnforceRetention(task.ID)) + assert.Equal(t, 5, countArtifacts()) + + changeRetention(6) + assert.NoError(t, retentionService.EnforceRetention(task.ID)) + assert.Equal(t, 5, countArtifacts()) + + changeRetention(4) + mockedRemovalService.On("DeleteArtifact", mock.Anything, mock.Anything, true).Return(nil).Run(deleteArtifacts).Once() + assert.NoError(t, retentionService.EnforceRetention(task.ID)) + assert.Equal(t, 4, countArtifacts()) + + changeRetention(2) + mockedRemovalService.On("DeleteArtifact", mock.Anything, mock.Anything, true).Return(nil).Run(deleteArtifacts).Twice() + assert.NoError(t, retentionService.EnforceRetention(task.ID)) + assert.Equal(t, 2, countArtifacts()) + }) + + t.Run("pitr", func(t *testing.T) { + task, err := models.CreateScheduledTask(db.Querier, models.CreateScheduledTaskParams{ + CronExpression: "* * * * *", + Type: models.ScheduledMongoDBBackupTask, + Data: &models.ScheduledTaskData{ + MongoDBBackupTask: &models.MongoBackupTaskData{ + CommonBackupTaskData: models.CommonBackupTaskData{ + ServiceID: *agent.ServiceID, + LocationID: locationRes.ID, + Name: "test3", + Retention: 5, + Mode: models.PITR, + }, + }, + }, + Disabled: false, }) require.NoError(t, err) - } - - createArtifact() - assert.Equal(t, 1, countArtifacts()) - createArtifact() - assert.NoError(t, retentionService.EnforceRetention(ctx, task.ID)) - assert.Equal(t, 2, countArtifacts()) - - createArtifact() - createArtifact() - createArtifact() - assert.NoError(t, retentionService.EnforceRetention(ctx, task.ID)) - assert.Equal(t, 5, countArtifacts()) - - changeRetention(6) - assert.NoError(t, retentionService.EnforceRetention(ctx, task.ID)) - assert.Equal(t, 5, countArtifacts()) - - changeRetention(4) - assert.NoError(t, retentionService.EnforceRetention(ctx, task.ID)) - assert.Equal(t, 4, countArtifacts()) - - changeRetention(2) - assert.NoError(t, retentionService.EnforceRetention(ctx, task.ID)) - assert.Equal(t, 2, countArtifacts()) + + t.Run("successful", func(t *testing.T) { + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + }) + require.NoError(t, err) + + for i := 1; i <= 5; i++ { + _, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{Metadata: &models.Metadata{FileList: []models.File{{Name: "file"}}}}) + require.NoError(t, err) + } + + // Retention equals Metadata list length, no dependency call. + err = retentionService.EnforceRetention(task.ID) + require.NoError(t, err) + + taskData := task.Data + taskData.MongoDBBackupTask.Retention = 3 + _, err = models.ChangeScheduledTask(db.Querier, task.ID, models.ChangeScheduledTaskParams{Data: taskData}) + require.NoError(t, err) + + // Must trim 2 elements from Metadata. + mockedRemovalService.On("TrimPITRArtifact", mock.Anything, artifact.ID, 2).Return(nil).Once() + + err = retentionService.EnforceRetention(task.ID) + require.NoError(t, err) + }) + + t.Run("more than one pitr artifact", func(t *testing.T) { + _, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + }) + require.NoError(t, err) + + err = retentionService.EnforceRetention(task.ID) + require.NotNil(t, err) + assert.Equal(t, "Can be only one artifact entity for PITR in the database but found 2", err.Error()) + }) + }) + + mockedRemovalService.AssertExpectations(t) } diff --git a/managed/services/backup/storage.go b/managed/services/backup/storage.go new file mode 100644 index 0000000000..afae1fd4a4 --- /dev/null +++ b/managed/services/backup/storage.go @@ -0,0 +1,31 @@ +// Copyright (C) 2023 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package backup + +import ( + "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services/minio" +) + +// GetStorageForLocation returns storage client depending on location type. +func GetStorageForLocation(location *models.BackupLocation) Storage { + switch location.Type { + case models.S3BackupLocationType: + return minio.New() + default: + return nil + } +} diff --git a/managed/services/checks/checks.go b/managed/services/checks/checks.go index 64efc13f66..72c915ed30 100644 --- a/managed/services/checks/checks.go +++ b/managed/services/checks/checks.go @@ -263,13 +263,12 @@ func (s *Service) runChecksLoop(ctx context.Context) { // First checks run, start all checks from all groups. err := s.runChecksGroup(ctx, "") // start all checks for { - switch err { - case nil: - // nothing, continue - case services.ErrAdvisorsDisabled: - s.l.Info("Advisor checks are not enabled, doing nothing.") - default: - s.l.Error(err) + if err != nil { + if errors.Is(err, services.ErrAdvisorsDisabled) { + s.l.Info("Advisor checks are not enabled, doing nothing.") + } else { + s.l.Error(err) + } } select { diff --git a/managed/services/checks/checks_test.go b/managed/services/checks/checks_test.go index cf12a05856..4e9690f2d2 100644 --- a/managed/services/checks/checks_test.go +++ b/managed/services/checks/checks_test.go @@ -63,6 +63,10 @@ func TestDownloadAdvisors(t *testing.T) { setupClients(t) sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) platformClient, err := platform.NewClient(db, devPlatformAddress) require.NoError(t, err) @@ -152,6 +156,10 @@ func TestLoadLocalChecks(t *testing.T) { func TestCollectAdvisors(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) platformClient, err := platform.NewClient(db, devPlatformAddress) @@ -216,6 +224,10 @@ func TestCollectAdvisors(t *testing.T) { func TestDisableChecks(t *testing.T) { t.Run("normal", func(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) s := New(db, nil, nil, nil, vmClient, clickhouseDB) @@ -241,6 +253,10 @@ func TestDisableChecks(t *testing.T) { t.Run("disable same check twice", func(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) s := New(db, nil, nil, nil, vmClient, clickhouseDB) @@ -269,6 +285,10 @@ func TestDisableChecks(t *testing.T) { t.Run("disable unknown check", func(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) s := New(db, nil, nil, nil, vmClient, clickhouseDB) @@ -288,6 +308,10 @@ func TestDisableChecks(t *testing.T) { func TestEnableChecks(t *testing.T) { t.Run("normal", func(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) s := New(db, nil, nil, nil, vmClient, clickhouseDB) @@ -319,6 +343,10 @@ func TestChangeInterval(t *testing.T) { var ams mockAlertmanagerService ams.On("SendAlerts", mock.Anything, mock.Anything).Return() sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) s := New(db, nil, nil, &ams, vmClient, clickhouseDB) @@ -359,6 +387,10 @@ func TestChangeInterval(t *testing.T) { func TestGetSecurityCheckResults(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) t.Run("STT enabled", func(t *testing.T) { @@ -387,6 +419,10 @@ func TestGetSecurityCheckResults(t *testing.T) { func TestStartChecks(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) setupClients(t) @@ -582,6 +618,10 @@ func setupClients(t *testing.T) { func TestFindTargets(t *testing.T) { sqlDB := testdb.Open(t, models.SetupFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) s := New(db, nil, nil, nil, vmClient, clickhouseDB) @@ -665,6 +705,10 @@ func TestGetFailedChecks(t *testing.T) { t.Parallel() sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, nil) t.Run("no failed check for service", func(t *testing.T) { diff --git a/managed/services/converters.go b/managed/services/converters.go index babc835443..7cf584d9d7 100644 --- a/managed/services/converters.go +++ b/managed/services/converters.go @@ -484,3 +484,40 @@ func SpecifyLogLevel(variant, minLogLevel inventorypb.LogLevel) string { return variant.String() } + +// nodeTypes maps protobuf types to their string types. +var nodeTypes = map[inventorypb.NodeType]models.NodeType{ + inventorypb.NodeType_GENERIC_NODE: models.GenericNodeType, + inventorypb.NodeType_CONTAINER_NODE: models.ContainerNodeType, + inventorypb.NodeType_REMOTE_NODE: models.RemoteNodeType, + inventorypb.NodeType_REMOTE_RDS_NODE: models.RemoteRDSNodeType, + inventorypb.NodeType_REMOTE_AZURE_DATABASE_NODE: models.RemoteAzureDatabaseNodeType, +} + +// ProtoToModelNodeType converts a NodeType from protobuf to model. +func ProtoToModelNodeType(nodeType inventorypb.NodeType) *models.NodeType { + if nodeType == inventorypb.NodeType_NODE_TYPE_INVALID { + return nil + } + result := nodeTypes[nodeType] + return &result +} + +// ServiceTypes maps protobuf types to their string types. +var ServiceTypes = map[inventorypb.ServiceType]models.ServiceType{ + inventorypb.ServiceType_MYSQL_SERVICE: models.MySQLServiceType, + inventorypb.ServiceType_MONGODB_SERVICE: models.MongoDBServiceType, + inventorypb.ServiceType_POSTGRESQL_SERVICE: models.PostgreSQLServiceType, + inventorypb.ServiceType_PROXYSQL_SERVICE: models.ProxySQLServiceType, + inventorypb.ServiceType_HAPROXY_SERVICE: models.HAProxyServiceType, + inventorypb.ServiceType_EXTERNAL_SERVICE: models.ExternalServiceType, +} + +// ProtoToModelServiceType converts a ServiceType from protobuf to model. +func ProtoToModelServiceType(serviceType inventorypb.ServiceType) *models.ServiceType { + if serviceType == inventorypb.ServiceType_SERVICE_TYPE_INVALID { + return nil + } + result := ServiceTypes[serviceType] + return &result +} diff --git a/managed/services/dbaas/kubernetes/client/client.go b/managed/services/dbaas/kubernetes/client/client.go index 7a09ff3fae..7f5b127b3d 100644 --- a/managed/services/dbaas/kubernetes/client/client.go +++ b/managed/services/dbaas/kubernetes/client/client.go @@ -417,7 +417,7 @@ func (c *Client) applyObject(helper *resource.Helper, namespace, name string, ob return nil } -func (c *Client) retrieveMetaFromObject(obj runtime.Object) (namespace, name string, err error) { +func (c *Client) retrieveMetaFromObject(obj runtime.Object) (namespace, name string, err error) { //nolint:nonamedreturns name, err = meta.NewAccessor().Name(obj) if err != nil { return diff --git a/managed/services/dbaas/kubernetes/kubernetes.go b/managed/services/dbaas/kubernetes/kubernetes.go index 275b2d2ced..a4d256ecd9 100644 --- a/managed/services/dbaas/kubernetes/kubernetes.go +++ b/managed/services/dbaas/kubernetes/kubernetes.go @@ -496,7 +496,7 @@ func (k *Kubernetes) GetWorkerNodes(ctx context.Context) ([]corev1.Node, error) } // GetAllClusterResources goes through all cluster nodes and sums their allocatable resources. -func (k *Kubernetes) GetAllClusterResources(ctx context.Context, clusterType ClusterType, volumes *corev1.PersistentVolumeList) ( +func (k *Kubernetes) GetAllClusterResources(ctx context.Context, clusterType ClusterType, volumes *corev1.PersistentVolumeList) ( //nolint:nonamedreturns cpuMillis uint64, memoryBytes uint64, diskSizeBytes uint64, err error, ) { nodes, err := k.GetWorkerNodes(ctx) @@ -574,7 +574,7 @@ func (k *Kubernetes) GetAllClusterResources(ctx context.Context, clusterType Clu // getResources extracts resources out of corev1.ResourceList and converts them to int64 values. // Millicpus are used for CPU values and bytes for memory. -func getResources(resources corev1.ResourceList) (cpuMillis uint64, memoryBytes uint64, err error) { +func getResources(resources corev1.ResourceList) (cpuMillis uint64, memoryBytes uint64, err error) { //nolint:nonamedreturns cpu, ok := resources[corev1.ResourceCPU] if ok { cpuMillis, err = convertors.StrToMilliCPU(cpu.String()) @@ -594,7 +594,7 @@ func getResources(resources corev1.ResourceList) (cpuMillis uint64, memoryBytes // GetConsumedCPUAndMemory returns consumed CPU and Memory in given namespace. If namespace // is empty, it tries to get them from all namespaces. -func (k *Kubernetes) GetConsumedCPUAndMemory(ctx context.Context, namespace string) ( +func (k *Kubernetes) GetConsumedCPUAndMemory(ctx context.Context, namespace string) ( //nolint:nonamedreturns cpuMillis uint64, memoryBytes uint64, err error, ) { // Get CPU and Memory Requests of Pods' containers. @@ -628,7 +628,7 @@ func (k *Kubernetes) GetConsumedCPUAndMemory(ctx context.Context, namespace stri } // GetConsumedDiskBytes returns consumed bytes. The strategy differs based on k8s cluster type. -func (k *Kubernetes) GetConsumedDiskBytes(ctx context.Context, clusterType ClusterType, volumes *corev1.PersistentVolumeList) (consumedBytes uint64, err error) { //nolint: lll +func (k *Kubernetes) GetConsumedDiskBytes(ctx context.Context, clusterType ClusterType, volumes *corev1.PersistentVolumeList) (consumedBytes uint64, err error) { //nolint:lll,nonamedreturns switch clusterType { case ClusterTypeUnknown: return 0, errors.Errorf("unknown cluster type") @@ -677,7 +677,7 @@ func (k *Kubernetes) GetConsumedDiskBytes(ctx context.Context, clusterType Clust } // sumVolumesSize returns sum of persistent volumes storage size in bytes. -func sumVolumesSize(pvs *corev1.PersistentVolumeList) (sum uint64, err error) { +func sumVolumesSize(pvs *corev1.PersistentVolumeList) (sum uint64, err error) { //nolint:nonamedreturns for _, pv := range pvs.Items { bytes, err := convertors.StrToBytes(pv.Spec.Capacity.Storage().String()) if err != nil { @@ -766,7 +766,7 @@ func (k *Kubernetes) InstallOLMOperator(ctx context.Context) error { log.Printf("Waiting for subscription/%s to install CSV", subscriptionKey.Name) csvKey, err := k.client.GetSubscriptionCSV(ctx, subscriptionKey) if err != nil { - return fmt.Errorf("subscription/%s failed to install CSV: %v", subscriptionKey.Name, err) + return fmt.Errorf("subscription/%s failed to install CSV: %w", subscriptionKey.Name, err) } log.Printf("Waiting for clusterserviceversion/%s to reach 'Succeeded' phase", csvKey.Name) if err := k.client.DoCSVWait(ctx, csvKey); err != nil { @@ -781,12 +781,14 @@ func (k *Kubernetes) InstallOLMOperator(ctx context.Context) error { return nil } -func decodeResources(f []byte) (objs []unstructured.Unstructured, err error) { +func decodeResources(f []byte) ([]unstructured.Unstructured, error) { dec := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(f), 8) + var objs []unstructured.Unstructured + for { var u unstructured.Unstructured - err = dec.Decode(&u) - if err == io.EOF { + err := dec.Decode(&u) + if errors.Is(err, io.EOF) { break } else if err != nil { return nil, err @@ -797,8 +799,9 @@ func decodeResources(f []byte) (objs []unstructured.Unstructured, err error) { return objs, nil } -func filterResources(resources []unstructured.Unstructured, filter func(unstructured. - Unstructured) bool, +func filterResources( //nolint:nonamedreturns + resources []unstructured.Unstructured, + filter func(unstructured.Unstructured) bool, ) (filtered []unstructured.Unstructured) { for _, r := range resources { if filter(r) { @@ -967,6 +970,7 @@ func (k *Kubernetes) ListTemplates(ctx context.Context, engine, namespace string } for _, templateCR := range templateCRs.Items { + //nolint:forcetypeassert templates = append(templates, &dbaasv1beta1.Template{ Name: templateCR.Object["metadata"].(map[string]interface{})["name"].(string), Kind: templateCR.Object["kind"].(string), diff --git a/managed/services/dbaas/kubernetes/types.go b/managed/services/dbaas/kubernetes/types.go index 9c3174caa6..f850966bf4 100644 --- a/managed/services/dbaas/kubernetes/types.go +++ b/managed/services/dbaas/kubernetes/types.go @@ -608,6 +608,7 @@ func UpdatePatchForPSMDB(dbCluster *dbaasv1.DatabaseCluster, updateRequest *dbaa if updateRequest.Params.Image != "" { dbCluster.Spec.DatabaseImage = updateRequest.Params.Image } + //nolint:nestif if updateRequest.Params.Replicaset != nil { if updateRequest.Params.Replicaset.ComputeResources != nil { if updateRequest.Params.Replicaset.ComputeResources.CpuM > 0 { diff --git a/managed/services/grafana/client.go b/managed/services/grafana/client.go index 5a5527c284..4287f6942e 100644 --- a/managed/services/grafana/client.go +++ b/managed/services/grafana/client.go @@ -342,7 +342,7 @@ func (c *Client) testCreateUser(ctx context.Context, login string, role role, au if err = c.do(ctx, "POST", "/api/admin/users", "", authHeaders, b, &m); err != nil { return 0, err } - userID := int(m["id"].(float64)) + userID := int(m["id"].(float64)) //nolint:forcetypeassert // settings in grafana.ini should make a viewer by default if role < editor { @@ -449,7 +449,7 @@ func (c *Client) CreateAlertRule(ctx context.Context, folderName, groupName stri if err := c.do(ctx, "POST", fmt.Sprintf("/api/ruler/grafana/api/v1/rules/%s", folderName), "", authHeaders, body, nil); err != nil { if err != nil { - if cErr, ok := errors.Cause(err).(*clientError); ok { + if cErr, ok := errors.Cause(err).(*clientError); ok { //nolint:errorlint return status.Error(codes.InvalidArgument, cErr.ErrorMessage) } return err @@ -559,7 +559,7 @@ func (c *Client) createAPIKey(ctx context.Context, name string, role role, authH if err = c.do(ctx, "POST", "/api/auth/keys", "", authHeaders, b, &m); err != nil { return 0, "", err } - key := m["key"].(string) + key := m["key"].(string) //nolint:forcetypeassert apiAuthHeaders := http.Header{} apiAuthHeaders.Set("Authorization", fmt.Sprintf("Bearer %s", key)) diff --git a/managed/services/grafana/client_test.go b/managed/services/grafana/client_test.go index df536799a0..546dbcaa7f 100644 --- a/managed/services/grafana/client_test.go +++ b/managed/services/grafana/client_test.go @@ -58,7 +58,7 @@ func TestClient(t *testing.T) { u, err := c.getAuthUser(ctx, nil) role := u.role - clientError, _ := errors.Cause(err).(*clientError) + clientError, _ := errors.Cause(err).(*clientError) //nolint:errorlint require.NotNil(t, clientError, "got role %s", role) assert.Equal(t, 401, clientError.Code) diff --git a/managed/services/inventory/agents.go b/managed/services/inventory/agents.go index c5a3beee52..82b566549e 100644 --- a/managed/services/inventory/agents.go +++ b/managed/services/inventory/agents.go @@ -59,7 +59,7 @@ func toInventoryAgent(q *reform.Querier, row *models.Agent, registry agentsRegis } if row.AgentType == models.PMMAgentType { - agent.(*inventorypb.PMMAgent).Connected = registry.IsConnected(row.AgentID) + agent.(*inventorypb.PMMAgent).Connected = registry.IsConnected(row.AgentID) //nolint:forcetypeassert } return agent, nil } @@ -193,7 +193,7 @@ func (as *AgentsService) AddPMMAgent(ctx context.Context, req *inventorypb.AddPM if err != nil { return err } - res = agent.(*inventorypb.PMMAgent) + res = agent.(*inventorypb.PMMAgent) //nolint:forcetypeassert return nil }) return res, e @@ -213,7 +213,7 @@ func (as *AgentsService) AddNodeExporter(ctx context.Context, req *inventorypb.A if err != nil { return err } - res = agent.(*inventorypb.NodeExporter) + res = agent.(*inventorypb.NodeExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -231,7 +231,7 @@ func (as *AgentsService) ChangeNodeExporter(ctx context.Context, req *inventoryp return nil, err } - res := agent.(*inventorypb.NodeExporter) + res := agent.(*inventorypb.NodeExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -276,7 +276,7 @@ func (as *AgentsService) AddMySQLdExporter(ctx context.Context, req *inventorypb if err != nil { return err } - res = agent.(*inventorypb.MySQLdExporter) + res = agent.(*inventorypb.MySQLdExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -294,7 +294,7 @@ func (as *AgentsService) ChangeMySQLdExporter(ctx context.Context, req *inventor return nil, err } - res := agent.(*inventorypb.MySQLdExporter) + res := agent.(*inventorypb.MySQLdExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -336,7 +336,7 @@ func (as *AgentsService) AddMongoDBExporter(ctx context.Context, req *inventoryp if err != nil { return err } - res = agent.(*inventorypb.MongoDBExporter) + res = agent.(*inventorypb.MongoDBExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -354,7 +354,7 @@ func (as *AgentsService) ChangeMongoDBExporter(ctx context.Context, req *invento return nil, err } - res := agent.(*inventorypb.MongoDBExporter) + res := agent.(*inventorypb.MongoDBExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -397,7 +397,7 @@ func (as *AgentsService) AddQANMySQLPerfSchemaAgent(ctx context.Context, req *in if err != nil { return err } - res = agent.(*inventorypb.QANMySQLPerfSchemaAgent) + res = agent.(*inventorypb.QANMySQLPerfSchemaAgent) //nolint:forcetypeassert return nil }) if e != nil { @@ -415,7 +415,7 @@ func (as *AgentsService) ChangeQANMySQLPerfSchemaAgent(ctx context.Context, req return nil, err } - res := agent.(*inventorypb.QANMySQLPerfSchemaAgent) + res := agent.(*inventorypb.QANMySQLPerfSchemaAgent) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -463,7 +463,7 @@ func (as *AgentsService) AddQANMySQLSlowlogAgent(ctx context.Context, req *inven if err != nil { return err } - res = agent.(*inventorypb.QANMySQLSlowlogAgent) + res = agent.(*inventorypb.QANMySQLSlowlogAgent) //nolint:forcetypeassert return nil }) if e != nil { @@ -481,7 +481,7 @@ func (as *AgentsService) ChangeQANMySQLSlowlogAgent(ctx context.Context, req *in return nil, err } - res := agent.(*inventorypb.QANMySQLSlowlogAgent) + res := agent.(*inventorypb.QANMySQLSlowlogAgent) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -523,7 +523,7 @@ func (as *AgentsService) AddPostgresExporter(ctx context.Context, req *inventory if err != nil { return err } - res = agent.(*inventorypb.PostgresExporter) + res = agent.(*inventorypb.PostgresExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -541,7 +541,7 @@ func (as *AgentsService) ChangePostgresExporter(ctx context.Context, req *invent return nil, err } - res := agent.(*inventorypb.PostgresExporter) + res := agent.(*inventorypb.PostgresExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -585,7 +585,7 @@ func (as *AgentsService) AddQANMongoDBProfilerAgent(ctx context.Context, req *in if err != nil { return err } - res = agent.(*inventorypb.QANMongoDBProfilerAgent) + res = agent.(*inventorypb.QANMongoDBProfilerAgent) //nolint:forcetypeassert return nil }) if e != nil { @@ -605,7 +605,7 @@ func (as *AgentsService) ChangeQANMongoDBProfilerAgent(ctx context.Context, req return nil, err } - res := agent.(*inventorypb.QANMongoDBProfilerAgent) + res := agent.(*inventorypb.QANMongoDBProfilerAgent) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -646,7 +646,7 @@ func (as *AgentsService) AddProxySQLExporter(ctx context.Context, req *inventory if err != nil { return err } - res = agent.(*inventorypb.ProxySQLExporter) + res = agent.(*inventorypb.ProxySQLExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -664,7 +664,7 @@ func (as *AgentsService) ChangeProxySQLExporter(ctx context.Context, req *invent return nil, err } - res := agent.(*inventorypb.ProxySQLExporter) + res := agent.(*inventorypb.ProxySQLExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -706,7 +706,7 @@ func (as *AgentsService) AddQANPostgreSQLPgStatementsAgent(ctx context.Context, if err != nil { return err } - res = agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) + res = agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) //nolint:forcetypeassert return nil }) if e != nil { @@ -724,7 +724,7 @@ func (as *AgentsService) ChangeQANPostgreSQLPgStatementsAgent(ctx context.Contex return nil, err } - res := agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) + res := agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -767,7 +767,7 @@ func (as *AgentsService) AddQANPostgreSQLPgStatMonitorAgent(ctx context.Context, if err != nil { return err } - res = agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) + res = agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) //nolint:forcetypeassert return nil }) if e != nil { @@ -785,7 +785,7 @@ func (as *AgentsService) ChangeQANPostgreSQLPgStatMonitorAgent(ctx context.Conte return nil, err } - res := agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) + res := agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -819,7 +819,7 @@ func (as *AgentsService) AddRDSExporter(ctx context.Context, req *inventorypb.Ad if err != nil { return err } - res = agent.(*inventorypb.RDSExporter) + res = agent.(*inventorypb.RDSExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -837,7 +837,7 @@ func (as *AgentsService) ChangeRDSExporter(ctx context.Context, req *inventorypb return nil, err } - res := agent.(*inventorypb.RDSExporter) + res := agent.(*inventorypb.RDSExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } @@ -869,7 +869,7 @@ func (as *AgentsService) AddExternalExporter(ctx context.Context, req *inventory if err != nil { return err } - res = agent.(*inventorypb.ExternalExporter) + res = agent.(*inventorypb.ExternalExporter) //nolint:forcetypeassert PMMAgentID = row.PMMAgentID return nil }) @@ -897,7 +897,7 @@ func (as *AgentsService) ChangeExternalExporter(req *inventorypb.ChangeExternalE // It's required to regenerate victoriametrics config file. as.vmdb.RequestConfigurationUpdate() - res := agent.(*inventorypb.ExternalExporter) + res := agent.(*inventorypb.ExternalExporter) //nolint:forceTypeAssert return res, nil } @@ -923,7 +923,7 @@ func (as *AgentsService) AddAzureDatabaseExporter(ctx context.Context, req *inve if err != nil { return err } - res = agent.(*inventorypb.AzureDatabaseExporter) + res = agent.(*inventorypb.AzureDatabaseExporter) //nolint:forcetypeassert return nil }) if e != nil { @@ -944,7 +944,7 @@ func (as *AgentsService) ChangeAzureDatabaseExporter( return nil, err } - res := agent.(*inventorypb.AzureDatabaseExporter) + res := agent.(*inventorypb.AzureDatabaseExporter) //nolint:forcetypeassert as.state.RequestStateUpdate(ctx, res.PmmAgentId) return res, nil } diff --git a/managed/services/inventory/nodes.go b/managed/services/inventory/nodes.go index 7dfb3b2e79..c6a2f7eb69 100644 --- a/managed/services/inventory/nodes.go +++ b/managed/services/inventory/nodes.go @@ -127,7 +127,7 @@ func (s *NodesService) AddGenericNode(ctx context.Context, req *inventorypb.AddG return nil, err } - return invNode.(*inventorypb.GenericNode), nil + return invNode.(*inventorypb.GenericNode), nil //nolint:forcetypeassert } // AddContainerNode adds Container Node. @@ -164,7 +164,7 @@ func (s *NodesService) AddContainerNode(ctx context.Context, req *inventorypb.Ad return nil, err } - return invNode.(*inventorypb.ContainerNode), nil + return invNode.(*inventorypb.ContainerNode), nil //nolint:forcetypeassert } // AddRemoteNode adds Remote Node. @@ -198,7 +198,7 @@ func (s *NodesService) AddRemoteNode(ctx context.Context, req *inventorypb.AddRe return nil, err } - return invNode.(*inventorypb.RemoteNode), nil + return invNode.(*inventorypb.RemoteNode), nil //nolint:forcetypeassert } // AddRemoteRDSNode adds a new RDS node @@ -232,7 +232,7 @@ func (s *NodesService) AddRemoteRDSNode(ctx context.Context, req *inventorypb.Ad return nil, err } - return invNode.(*inventorypb.RemoteRDSNode), nil + return invNode.(*inventorypb.RemoteRDSNode), nil //nolint:forcetypeassert } // AddRemoteAzureDatabaseNode adds a new Azure database node @@ -266,7 +266,7 @@ func (s *NodesService) AddRemoteAzureDatabaseNode(ctx context.Context, req *inve return nil, err } - return invNode.(*inventorypb.RemoteAzureDatabaseNode), nil + return invNode.(*inventorypb.RemoteAzureDatabaseNode), nil //nolint:forcetypeassert } // Remove removes Node without any Agents and Services. diff --git a/managed/services/inventory/services.go b/managed/services/inventory/services.go index c00502d115..d9d1c65aa4 100644 --- a/managed/services/inventory/services.go +++ b/managed/services/inventory/services.go @@ -155,7 +155,7 @@ func (ss *ServicesService) AddMySQL(ctx context.Context, params *models.AddDBMSS ss.vc.RequestSoftwareVersionsUpdate() - return res.(*inventorypb.MySQLService), nil + return res.(*inventorypb.MySQLService), nil //nolint:forcetypeassert } // AddMongoDB inserts MongoDB Service with given parameters. @@ -179,7 +179,7 @@ func (ss *ServicesService) AddMongoDB(ctx context.Context, params *models.AddDBM if err != nil { return nil, err } - return res.(*inventorypb.MongoDBService), nil + return res.(*inventorypb.MongoDBService), nil //nolint:forcetypeassert } // AddPostgreSQL inserts PostgreSQL Service with given parameters. @@ -203,7 +203,7 @@ func (ss *ServicesService) AddPostgreSQL(ctx context.Context, params *models.Add if err != nil { return nil, err } - return res.(*inventorypb.PostgreSQLService), nil + return res.(*inventorypb.PostgreSQLService), nil //nolint:forcetypeassert } // AddProxySQL inserts ProxySQL Service with given parameters. @@ -224,7 +224,7 @@ func (ss *ServicesService) AddProxySQL(ctx context.Context, params *models.AddDB if err != nil { return nil, err } - return res.(*inventorypb.ProxySQLService), nil + return res.(*inventorypb.ProxySQLService), nil //nolint:forcetypeassert } // AddHAProxyService inserts HAProxy Service with given parameters. @@ -246,7 +246,7 @@ func (ss *ServicesService) AddHAProxyService(_ context.Context, params *models.A if err != nil { return nil, err } - return res.(*inventorypb.HAProxyService), nil + return res.(*inventorypb.HAProxyService), nil //nolint:forcetypeassert } // AddExternalService inserts External Service with given parameters. @@ -270,7 +270,7 @@ func (ss *ServicesService) AddExternalService(ctx context.Context, params *model if err != nil { return nil, err } - return res.(*inventorypb.ExternalService), nil + return res.(*inventorypb.ExternalService), nil //nolint:forcetypeassert } // Remove removes Service without any Agents. diff --git a/managed/services/management/agent.go b/managed/services/management/agent.go index b075d64619..79111cd64b 100644 --- a/managed/services/management/agent.go +++ b/managed/services/management/agent.go @@ -19,6 +19,8 @@ import ( "context" "github.com/AlekSi/pointer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/reform.v1" @@ -43,16 +45,33 @@ func NewAgentService(db *reform.DB, r agentsRegistry) *AgentService { } // ListAgents returns a filtered list of Agents. -// -//nolint:unparam func (s *AgentService) ListAgents(ctx context.Context, req *agentv1beta1.ListAgentRequest) (*agentv1beta1.ListAgentResponse, error) { - serviceID := req.ServiceId + var err error + err = s.validateListAgentRequest(req) + if err != nil { + return nil, err + } + + var agents []*agentv1beta1.UniversalAgent + if req.ServiceId != "" { + agents, err = s.listAgentsByServiceID(ctx, req.ServiceId) + } else { + agents, err = s.listAgentsByNodeID(req.NodeId) + } + if err != nil { + return nil, err + } + + return &agentv1beta1.ListAgentResponse{Agents: agents}, nil +} + +// listAgentsByServiceID returns a list of Agents filtered by ServiceID. +func (s *AgentService) listAgentsByServiceID(ctx context.Context, serviceID string) ([]*agentv1beta1.UniversalAgent, error) { var agents []*models.Agent var service *models.Service - // TODO: provide a higher level of data consistency guarantee by using a locking mechanism. - errTX := s.db.InTransaction(func(tx *reform.TX) error { + errTX := s.db.InTransactionContext(ctx, nil, func(tx *reform.TX) error { var err error agents, err = models.FindAgents(tx.Querier, models.AgentFilters{}) @@ -72,7 +91,7 @@ func (s *AgentService) ListAgents(ctx context.Context, req *agentv1beta1.ListAge return nil, errTX } - var svcAgents []*agentv1beta1.UniversalAgent + var res []*agentv1beta1.UniversalAgent for _, agent := range agents { if IsNodeAgent(agent, service) || IsVMAgent(agent, service) || IsServiceAgent(agent, service) { @@ -80,11 +99,33 @@ func (s *AgentService) ListAgents(ctx context.Context, req *agentv1beta1.ListAge if err != nil { return nil, err } - svcAgents = append(svcAgents, ag) + res = append(res, ag) } } - return &agentv1beta1.ListAgentResponse{Agents: svcAgents}, nil + return res, nil +} + +// listAgentsByNodeID returns a list of Agents filtered by NodeID. +func (s *AgentService) listAgentsByNodeID(nodeID string) ([]*agentv1beta1.UniversalAgent, error) { + agents, err := models.FindAgents(s.db.Querier, models.AgentFilters{}) + if err != nil { + return nil, err + } + + var res []*agentv1beta1.UniversalAgent + + for _, agent := range agents { + if pointer.GetString(agent.NodeID) == nodeID || pointer.GetString(agent.RunsOnNodeID) == nodeID { + ag, err := s.agentToAPI(agent) + if err != nil { + return nil, err + } + res = append(res, ag) + } + } + + return res, nil } func (s *AgentService) agentToAPI(agent *models.Agent) (*agentv1beta1.UniversalAgent, error) { @@ -171,3 +212,15 @@ func (s *AgentService) agentToAPI(agent *models.Agent) (*agentv1beta1.UniversalA return ua, nil } + +func (s *AgentService) validateListAgentRequest(req *agentv1beta1.ListAgentRequest) error { + if req.ServiceId == "" && req.NodeId == "" { + return status.Error(codes.InvalidArgument, "Either service_id or node_id is expected.") + } + + if req.ServiceId != "" && req.NodeId != "" { + return status.Error(codes.InvalidArgument, "Either service_id or node_id is expected, not both.") + } + + return nil +} diff --git a/managed/services/management/agent_test.go b/managed/services/management/agent_test.go index d2ebfcc1e1..61fcde9b95 100644 --- a/managed/services/management/agent_test.go +++ b/managed/services/management/agent_test.go @@ -18,11 +18,15 @@ package management import ( "context" "testing" + "time" "github.com/AlekSi/pointer" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/reform.v1" "gopkg.in/reform.v1/dialects/postgresql" @@ -33,18 +37,23 @@ import ( "github.com/percona/pmm/managed/utils/tests" ) -func setup(t *testing.T) (context.Context, *AgentService, func(t *testing.T), *mockPrometheusService) { //nolint:unparam +var now time.Time + +func setup(t *testing.T) (context.Context, *AgentService, func(t *testing.T)) { t.Helper() + now = models.Now() + origNowF := models.Now + models.Now = func() time.Time { + return now + } + ctx := logger.Set(context.Background(), t.Name()) uuid.SetRand(&tests.IDReader{}) sqlDB := testdb.Open(t, models.SetupFixtures, nil) db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - vmdb := &mockPrometheusService{} - vmdb.Test(t) - state := &mockAgentsStateUpdater{} state.Test(t) @@ -52,27 +61,45 @@ func setup(t *testing.T) (context.Context, *AgentService, func(t *testing.T), *m ar.Test(t) teardown := func(t *testing.T) { + models.Now = origNowF uuid.SetRand(nil) require.NoError(t, sqlDB.Close()) - vmdb.AssertExpectations(t) state.AssertExpectations(t) ar.AssertExpectations(t) } s := NewAgentService(db, ar) - return ctx, s, teardown, vmdb + return ctx, s, teardown } func TestAgentService(t *testing.T) { - t.Run("List of agents", func(t *testing.T) { + t.Run("Should return a validation error when no params passed", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + response, err := s.ListAgents(ctx, &agentv1beta1.ListAgentRequest{}) + assert.Nil(t, response) + tests.AssertGRPCError(t, status.New(codes.InvalidArgument, "Either service_id or node_id is expected."), err) + }) + + t.Run("Should return a validation error when both params passed", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + response, err := s.ListAgents(ctx, &agentv1beta1.ListAgentRequest{ServiceId: "foo-id", NodeId: "bar-id"}) + assert.Nil(t, response) + tests.AssertGRPCError(t, status.New(codes.InvalidArgument, "Either service_id or node_id is expected, not both."), err) + }) + + t.Run("ListAgents", func(t *testing.T) { const ( pgExporterID = "/agent_id/00000000-0000-4000-8000-000000000003" pgStatStatementID = "/agent_id/00000000-0000-4000-8000-000000000004" ) t.Run("should output a list of agents provisioned by default", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) + ctx, s, teardown := setup(t) defer teardown(t) services, err := models.FindServices(s.db.Querier, models.ServiceFilters{ @@ -89,13 +116,58 @@ func TestAgentService(t *testing.T) { response, err := s.ListAgents(ctx, &agentv1beta1.ListAgentRequest{ ServiceId: service.ServiceID, }) - require.NoError(t, err) - assert.Len(t, response.Agents, 3) // 2 exporters + 1 agent + + expected := []*agentv1beta1.UniversalAgent{ + { + AgentId: pgExporterID, + AgentType: "postgres_exporter", + PmmAgentId: models.PMMServerAgentID, + IsConnected: false, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + Username: "postgres", + PostgresqlOptions: &agentv1beta1.UniversalAgent_PostgreSQLOptions{ + SslCa: "", + SslCert: "", + IsSslKeySet: false, + }, + ServiceId: "/service_id/00000000-0000-4000-8000-000000000002", + Status: "UNKNOWN", + Tls: true, + }, + { + AgentId: pgStatStatementID, + AgentType: "qan-postgresql-pgstatements-agent", + PmmAgentId: models.PMMServerAgentID, + IsConnected: false, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + Username: "postgres", + PostgresqlOptions: &agentv1beta1.UniversalAgent_PostgreSQLOptions{ + SslCa: "", + SslCert: "", + IsSslKeySet: false, + }, + ServiceId: "/service_id/00000000-0000-4000-8000-000000000002", + Status: "UNKNOWN", + Tls: true, + }, + { + AgentId: models.PMMServerAgentID, + AgentType: "pmm-agent", + RunsOnNodeId: models.PMMServerAgentID, + IsConnected: true, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + }, + } + + assert.Equal(t, expected, response.Agents) }) t.Run("should output a list of agents provisioned for RDS service", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) + ctx, s, teardown := setup(t) defer teardown(t) node, err := models.CreateNode(s.db.Querier, models.RemoteRDSNodeType, &models.CreateNodeParams{ @@ -122,18 +194,30 @@ func TestAgentService(t *testing.T) { }) require.NoError(t, err) - s.r.(*mockAgentsRegistry).On("IsConnected", rdsExporter.AgentID).Return(false).Once() // RDS exporter + s.r.(*mockAgentsRegistry).On("IsConnected", rdsExporter.AgentID).Return(false).Once() response, err := s.ListAgents(ctx, &agentv1beta1.ListAgentRequest{ ServiceId: service.ServiceID, }) - require.NoError(t, err) - assert.Len(t, response.Agents, 1) + + expected := []*agentv1beta1.UniversalAgent{ + { + AgentId: rdsExporter.AgentID, + AgentType: "rds_exporter", + PmmAgentId: "/agent_id/00000000-0000-4000-8000-000000000007", + IsConnected: false, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + ServiceId: "/service_id/00000000-0000-4000-8000-000000000006", + Status: "UNKNOWN", + }, + } + assert.Equal(t, expected, response.Agents) }) t.Run("should output a list of agents provisioned for Azure service", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) + ctx, s, teardown := setup(t) defer teardown(t) node, err := models.CreateNode(s.db.Querier, models.RemoteAzureDatabaseNodeType, &models.CreateNodeParams{ @@ -160,14 +244,26 @@ func TestAgentService(t *testing.T) { }) require.NoError(t, err) - s.r.(*mockAgentsRegistry).On("IsConnected", azureExporter.AgentID).Return(false).Once() // Azure exporter + s.r.(*mockAgentsRegistry).On("IsConnected", azureExporter.AgentID).Return(false).Once() response, err := s.ListAgents(ctx, &agentv1beta1.ListAgentRequest{ ServiceId: service.ServiceID, }) - require.NoError(t, err) - assert.Len(t, response.Agents, 1) + + expected := []*agentv1beta1.UniversalAgent{ + { + AgentId: azureExporter.AgentID, + AgentType: "azure_database_exporter", + PmmAgentId: "/agent_id/00000000-0000-4000-8000-000000000007", + IsConnected: false, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + ServiceId: "/service_id/00000000-0000-4000-8000-000000000006", + Status: "UNKNOWN", + }, + } + assert.Equal(t, expected, response.Agents) }) }) } diff --git a/managed/services/management/backup/artifacts_service.go b/managed/services/management/backup/artifacts_service.go index d16ee37a0b..e278f6dc34 100644 --- a/managed/services/management/backup/artifacts_service.go +++ b/managed/services/management/backup/artifacts_service.go @@ -29,25 +29,26 @@ import ( backuppb "github.com/percona/pmm/api/managementpb/backup" "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services/backup" ) // ArtifactsService represents artifacts API. type ArtifactsService struct { - l *logrus.Entry - db *reform.DB - removalSVC removalService - pitrTimerangeSVC pitrTimerangeService + l *logrus.Entry + db *reform.DB + removalSVC removalService + pbmPITRService pbmPITRService backuppb.UnimplementedArtifactsServer } // NewArtifactsService creates new artifacts API service. -func NewArtifactsService(db *reform.DB, removalSVC removalService, storage pitrTimerangeService) *ArtifactsService { +func NewArtifactsService(db *reform.DB, removalSVC removalService, pbmPITRService pbmPITRService) *ArtifactsService { return &ArtifactsService{ - l: logrus.WithField("component", "management/backup/artifacts"), - db: db, - removalSVC: removalSVC, - pitrTimerangeSVC: storage, + l: logrus.WithField("component", "management/backup/artifacts"), + db: db, + removalSVC: removalSVC, + pbmPITRService: pbmPITRService, } } @@ -104,15 +105,26 @@ func (s *ArtifactsService) ListArtifacts(context.Context, *backuppb.ListArtifact }, nil } -// DeleteArtifact deletes specified artifact. +// DeleteArtifact deletes specified artifact and its files. func (s *ArtifactsService) DeleteArtifact( ctx context.Context, req *backuppb.DeleteArtifactRequest, ) (*backuppb.DeleteArtifactResponse, error) { - if err := s.removalSVC.DeleteArtifact(ctx, req.ArtifactId, req.RemoveFiles); err != nil { + artifact, err := models.FindArtifactByID(s.db.Querier, req.ArtifactId) + if err != nil { + return nil, err + } + + location, err := models.FindBackupLocationByID(s.db.Querier, artifact.LocationID) + if err != nil { return nil, err } + storage := backup.GetStorageForLocation(location) + + if err := s.removalSVC.DeleteArtifact(storage, req.ArtifactId, req.RemoveFiles); err != nil { + return nil, err + } return &backuppb.DeleteArtifactResponse{}, nil } @@ -133,7 +145,11 @@ func (s *ArtifactsService) ListPitrTimeranges( } if artifact.Mode != models.PITR { - return nil, status.Errorf(codes.FailedPrecondition, "Artifact is not a PITR artifact") + return nil, status.Errorf(codes.FailedPrecondition, "Artifact is not a PITR artifact.") + } + + if artifact.IsShardedCluster { + return nil, status.Errorf(codes.FailedPrecondition, "Getting PITR timeranges is not supported for sharded cluster artifacts.") } location, err := models.FindBackupLocationByID(s.db.Querier, artifact.LocationID) @@ -141,7 +157,9 @@ func (s *ArtifactsService) ListPitrTimeranges( return nil, err } - timelines, err := s.pitrTimerangeSVC.ListPITRTimeranges(ctx, artifact.Name, location) + storage := backup.GetStorageForLocation(location) + + timelines, err := s.pbmPITRService.ListPITRTimeranges(ctx, storage, location, artifact) if err != nil { return nil, err } @@ -184,6 +202,8 @@ func convertBackupStatus(status models.BackupStatus) (backuppb.BackupStatus, err return backuppb.BackupStatus_BACKUP_STATUS_DELETING, nil case models.FailedToDeleteBackupStatus: return backuppb.BackupStatus_BACKUP_STATUS_FAILED_TO_DELETE, nil + case models.CleanupInProgressStatus: + return backuppb.BackupStatus_BACKUP_STATUS_CLEANUP_IN_PROGRESS, nil default: return 0, errors.Errorf("invalid status '%s'", status) } @@ -226,20 +246,52 @@ func convertArtifact( } return &backuppb.Artifact{ - ArtifactId: a.ID, - Name: a.Name, - Vendor: a.Vendor, - LocationId: a.LocationID, - LocationName: l.Name, - ServiceId: a.ServiceID, - ServiceName: serviceName, - DataModel: dataModel, - Mode: backupMode, - Status: backupStatus, - CreatedAt: createdAt, + ArtifactId: a.ID, + Name: a.Name, + Vendor: a.Vendor, + LocationId: a.LocationID, + LocationName: l.Name, + ServiceId: a.ServiceID, + ServiceName: serviceName, + DataModel: dataModel, + Mode: backupMode, + Status: backupStatus, + CreatedAt: createdAt, + IsShardedCluster: a.IsShardedCluster, + Folder: a.Folder, + MetadataList: artifactMetadataListToProto(a), }, nil } +// artifactMetadataListToProto returns artifact metadata list in protobuf format. +func artifactMetadataListToProto(artifact *models.Artifact) []*backuppb.Metadata { + res := make([]*backuppb.Metadata, len(artifact.MetadataList)) + for i, metadata := range artifact.MetadataList { + res[i] = &backuppb.Metadata{} + res[i].FileList = make([]*backuppb.File, len(metadata.FileList)) + + for j, file := range metadata.FileList { + res[i].FileList[j] = &backuppb.File{ + Name: file.Name, + IsDirectory: file.IsDirectory, + } + } + + if metadata.RestoreTo != nil { + res[i].RestoreTo = timestamppb.New(*metadata.RestoreTo) + } + + if metadata.BackupToolData != nil { + if metadata.BackupToolData.PbmMetadata != nil { + res[i].BackupToolMetadata = &backuppb.Metadata_PbmMetadata{ + PbmMetadata: &backuppb.PbmMetadata{Name: metadata.BackupToolData.PbmMetadata.Name}, + } + } + } + } + return res +} + // Check interfaces. var ( _ backuppb.ArtifactsServer = (*ArtifactsService)(nil) diff --git a/managed/services/management/backup/artifacts_service_test.go b/managed/services/management/backup/artifacts_service_test.go index 94f57e78b9..9dddfd29ae 100644 --- a/managed/services/management/backup/artifacts_service_test.go +++ b/managed/services/management/backup/artifacts_service_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/reform.v1" "gopkg.in/reform.v1/dialects/postgresql" @@ -38,12 +39,16 @@ import ( "github.com/percona/pmm/managed/utils/tests" ) -func TestListPitrTimelines(t *testing.T) { +func TestListPitrTimeranges(t *testing.T) { ctx := context.Background() sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - mockedPitrStorageSvc := &mockPitrTimerangeService{} + mockedPbmPITRService := &mockPbmPITRService{} timelines := []backup.Timeline{ { @@ -53,8 +58,8 @@ func TestListPitrTimelines(t *testing.T) { }, } - mockedPitrStorageSvc.On("ListPITRTimeranges", ctx, mock.Anything, mock.Anything).Return(timelines, nil) - artifactsService := NewArtifactsService(db, nil, mockedPitrStorageSvc) + mockedPbmPITRService.On("ListPITRTimeranges", ctx, mock.Anything, mock.Anything, mock.Anything).Return(timelines, nil) + artifactsService := NewArtifactsService(db, nil, mockedPbmPITRService) var locationID string params := models.CreateBackupLocationParams{ @@ -120,8 +125,86 @@ func TestListPitrTimelines(t *testing.T) { response, err := artifactsService.ListPitrTimeranges(ctx, &backuppb.ListPitrTimerangesRequest{ ArtifactId: artifact.ID, }) - tests.AssertGRPCError(t, status.New(codes.FailedPrecondition, "Artifact is not a PITR artifact"), err) + tests.AssertGRPCError(t, status.New(codes.FailedPrecondition, "Artifact is not a PITR artifact."), err) assert.Nil(t, response) }) - mock.AssertExpectationsForObjects(t, mockedPitrStorageSvc) + mock.AssertExpectationsForObjects(t, mockedPbmPITRService) +} + +func TestArtifactMetadataListToProto(t *testing.T) { + sqlDB := testdb.Open(t, models.SkipFixtures, nil) + t.Cleanup(func() { + require.NoError(t, sqlDB.Close()) + }) + + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + + params := models.CreateBackupLocationParams{ + Name: gofakeit.Name(), + Description: "", + } + params.S3Config = &models.S3LocationConfig{ + Endpoint: "https://awsS3.us-west-2.amazonaws.com/", + AccessKey: "access_key", + SecretKey: "secret_key", + BucketName: "example_bucket", + BucketRegion: "us-east-1", + } + loc, err := models.CreateBackupLocation(db.Querier, params) + require.NoError(t, err) + require.NotEmpty(t, loc.ID) + + artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ + Name: "test_artifact", + Vendor: "test_vendor", + LocationID: loc.ID, + ServiceID: "test_service", + Mode: models.PITR, + DataModel: models.LogicalDataModel, + Status: models.PendingBackupStatus, + }) + assert.NoError(t, err) + + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir1", IsDirectory: true}, {Name: "file1"}, {Name: "file2"}, {Name: "file3"}}, + }, + }) + require.NoError(t, err) + + restoreTo := time.Unix(123, 456) + + artifact, err = models.UpdateArtifact(db.Querier, artifact.ID, models.UpdateArtifactParams{ + Metadata: &models.Metadata{ + FileList: []models.File{{Name: "dir2", IsDirectory: true}, {Name: "file4"}, {Name: "file5"}, {Name: "file6"}}, + RestoreTo: &restoreTo, + BackupToolData: &models.BackupToolData{PbmMetadata: &models.PbmMetadata{Name: "backup tool data name"}}, + }, + }) + require.NoError(t, err) + + expected := []*backuppb.Metadata{ + { + FileList: []*backuppb.File{ + {Name: "dir1", IsDirectory: true}, + {Name: "file1"}, + {Name: "file2"}, + {Name: "file3"}, + }, + }, + { + FileList: []*backuppb.File{ + {Name: "dir2", IsDirectory: true}, + {Name: "file4"}, + {Name: "file5"}, + {Name: "file6"}, + }, + RestoreTo: ×tamppb.Timestamp{Seconds: 123, Nanos: 456}, + BackupToolMetadata: &backuppb.Metadata_PbmMetadata{PbmMetadata: &backuppb.PbmMetadata{Name: "backup tool data name"}}, + }, + } + + actual := artifactMetadataListToProto(artifact) + + assert.Equal(t, expected, actual) } diff --git a/managed/services/management/backup/backups_service.go b/managed/services/management/backup/backups_service.go index 8ab964f608..5242962155 100644 --- a/managed/services/management/backup/backups_service.go +++ b/managed/services/management/backup/backups_service.go @@ -18,6 +18,9 @@ package backup import ( "context" "fmt" + "path/filepath" + "regexp" + "strings" "time" "github.com/AlekSi/pointer" @@ -55,6 +58,11 @@ const ( maxRetryInterval = 8 * time.Hour ) +var ( + folderRe = regexp.MustCompile(`^[\.:\/\w-]*$`) // Dots, colons, slashes, letters, digits, underscores, dashes. + nameRe = regexp.MustCompile(`^[\.:\w-]*$`) // Dots, colons, letters, digits, underscores, dashes. +) + // NewBackupsService creates new backups API service. func NewBackupsService( db *reform.DB, @@ -81,6 +89,14 @@ func (s *BackupsService) StartBackup(ctx context.Context, req *backuppb.StartBac return nil, status.Errorf(codes.InvalidArgument, "Exceeded max retry interval %s.", maxRetryInterval) } + if err := isFolderSafe(req.Folder); err != nil { + return nil, err + } + + if err := isNameSafe(req.Name); err != nil { + return nil, err + } + dataModel, err := convertModelToBackupModel(req.DataModel) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "Invalid data model: %s", req.DataModel.String()) @@ -105,6 +121,7 @@ func (s *BackupsService) StartBackup(ctx context.Context, req *backuppb.StartBac Mode: models.Snapshot, Retries: req.Retries, RetryInterval: req.RetryInterval.AsDuration(), + Folder: req.Folder, }) if err != nil { return nil, convertBackupError(err) @@ -157,6 +174,14 @@ func (s *BackupsService) ScheduleBackup(ctx context.Context, req *backuppb.Sched return nil, status.Errorf(codes.InvalidArgument, "Exceeded max retry interval %s.", maxRetryInterval) } + if err := isFolderSafe(req.Folder); err != nil { + return nil, err + } + + if err := isNameSafe(req.Name); err != nil { + return nil, err + } + mode, err := convertBackupModeToModel(req.Mode) if err != nil { return nil, err @@ -189,6 +214,7 @@ func (s *BackupsService) ScheduleBackup(ctx context.Context, req *backuppb.Sched Retention: req.Retention, Retries: req.Retries, RetryInterval: req.RetryInterval.AsDuration(), + Folder: req.Folder, } var task scheduler.Task @@ -559,6 +585,7 @@ func convertTaskToScheduledBackup(task *models.ScheduledTask, scheduledBackup.Description = commonBackupData.Description scheduledBackup.Retention = commonBackupData.Retention scheduledBackup.Retries = commonBackupData.Retries + scheduledBackup.Folder = commonBackupData.Folder var err error if scheduledBackup.DataModel, err = convertDataModel(commonBackupData.DataModel); err != nil { @@ -652,7 +679,7 @@ func convertBackupError(backupErr error) error { Code: code, }) if err != nil { - return fmt.Errorf("failed to construct status error: %w, restore error: %s", err, backupErr) + return fmt.Errorf("failed to construct status error: %w, restore error: %w", err, backupErr) } return st.Err() @@ -703,7 +730,7 @@ func convertRestoreBackupError(restoreError error) error { Code: code, }) if err != nil { - return fmt.Errorf("failed to construct status error: %w, restore error: %s", err, restoreError) + return fmt.Errorf("failed to construct status error: %w, restore error: %w", err, restoreError) } return st.Err() @@ -725,6 +752,39 @@ func convertModelError(modelError error) error { } } +// isFolderSafe checks if specified path is safe against traversal attacks. +func isFolderSafe(path string) error { + if path == "" { + return nil + } + + canonical := filepath.Clean(path) + if canonical != path { + return status.Errorf(codes.InvalidArgument, "Specified folder in non-canonical format, canonical would be: %q.", canonical) + } + + if strings.HasPrefix(path, "/") { + return status.Error(codes.InvalidArgument, "Folder should be a relative path (shouldn't contain leading slashes).") + } + + if path == ".." || strings.HasPrefix(path, "../") { + return status.Error(codes.InvalidArgument, "Specified folder refers to a parent directory.") + } + + if !folderRe.Match([]byte(path)) { + return status.Error(codes.InvalidArgument, "Folder name can contain only dots, colons, slashes, letters, digits, underscores and dashes.") + } + + return nil +} + +func isNameSafe(name string) error { + if !nameRe.Match([]byte(name)) { + return status.Error(codes.InvalidArgument, "Backup name can contain only dots, colons, letters, digits, underscores and dashes.") + } + return nil +} + // Check interfaces. var ( _ backuppb.BackupsServer = (*BackupsService)(nil) diff --git a/managed/services/management/backup/backups_service_test.go b/managed/services/management/backup/backups_service_test.go index ddc144b83d..e1afdd22a0 100644 --- a/managed/services/management/backup/backups_service_test.go +++ b/managed/services/management/backup/backups_service_test.go @@ -169,6 +169,79 @@ func TestStartBackup(t *testing.T) { }) require.NoError(t, err) }) + + t.Run("check folder and artifact name", func(t *testing.T) { + ctx := context.Background() + backupService := &mockBackupService{} + backupSvc := NewBackupsService(db, backupService, nil, nil) + + tc := []struct { + TestName string + BackupName string + Folder string + ErrString string + }{ + { + TestName: "normal", + BackupName: ".normal_name:1-", + Folder: ".normal_folder:1-/tmp", + ErrString: "", + }, + { + TestName: "not allowed symbols in name", + BackupName: "normal/name", + Folder: "normal_folder", + ErrString: "rpc error: code = InvalidArgument desc = Backup name can contain only dots, colons, letters, digits, underscores and dashes.", + }, + { + TestName: "not allowed symbols in folder", + BackupName: "normal_name", + Folder: "$._folder:1-/tmp", + ErrString: "rpc error: code = InvalidArgument desc = Folder name can contain only dots, colons, slashes, letters, digits, underscores and dashes.", + }, + { + TestName: "folder refers to a parent directory", + BackupName: "normal_name", + Folder: "../../../some_folder", + ErrString: "rpc error: code = InvalidArgument desc = Specified folder refers to a parent directory.", + }, + { + TestName: "folder points to absolute path", + BackupName: "normal_name", + Folder: "/some_folder", + ErrString: "rpc error: code = InvalidArgument desc = Folder should be a relative path (shouldn't contain leading slashes).", + }, + { + TestName: "folder in non-canonical format", + BackupName: "normal_name", + Folder: "some_folder/../../../../root", + ErrString: "rpc error: code = InvalidArgument desc = Specified folder in non-canonical format, canonical would be: \"../../../root\".", + }, + } + + for _, test := range tc { + t.Run(test.TestName, func(t *testing.T) { + if test.ErrString == "" { + backupService.On("PerformBackup", mock.Anything, mock.Anything).Return("", nil).Once() + } + res, err := backupSvc.StartBackup(ctx, &backuppb.StartBackupRequest{ + Name: test.BackupName, + Folder: test.Folder, + ServiceId: *agent.ServiceID, + DataModel: backuppb.DataModel_LOGICAL, + }) + if test.ErrString != "" { + assert.Nil(t, res) + assert.Equal(t, test.ErrString, err.Error()) + return + } + assert.NoError(t, err) + assert.NotNil(t, res) + }) + } + + backupService.AssertExpectations(t) + }) }) } @@ -266,7 +339,7 @@ func TestScheduledBackups(t *testing.T) { LocationId: locationRes.ID, CronExpression: "1 * * * *", StartTime: timestamppb.New(time.Now()), - Name: t.Name(), + Name: "schedule_change", Description: t.Name(), Enabled: true, Mode: backuppb.BackupMode_SNAPSHOT, @@ -276,8 +349,8 @@ func TestScheduledBackups(t *testing.T) { } res, err := backupSvc.ScheduleBackup(ctx, req) - assert.NoError(t, err) - assert.NotEmpty(t, res.ScheduledBackupId) + require.NoError(t, err) + require.NotEmpty(t, res.ScheduledBackupId) task, err := models.FindScheduledTaskByID(db.Querier, res.ScheduledBackupId) require.NoError(t, err) diff --git a/managed/services/management/backup/deps.go b/managed/services/management/backup/deps.go index 4b43efd318..a7b660c6ba 100644 --- a/managed/services/management/backup/deps.go +++ b/managed/services/management/backup/deps.go @@ -28,7 +28,7 @@ import ( //go:generate ../../../../bin/mockery -name=backupService -case=snake -inpkg -testonly //go:generate ../../../../bin/mockery -name=scheduleService -case=snake -inpkg -testonly //go:generate ../../../../bin/mockery -name=removalService -case=snake -inpkg -testonly -//go:generate ../../../../bin/mockery -name=pitrTimerangeService -case=snake -inpkg -testonly +//go:generate ../../../../bin/mockery -name=pbmPITRService -case=snake -inpkg -testonly type awsS3 interface { GetBucketLocation(ctx context.Context, host string, accessKey, secretKey, name string) (string, error) @@ -56,11 +56,12 @@ type scheduleService interface { } type removalService interface { - DeleteArtifact(ctx context.Context, artifactID string, removeFiles bool) error + // DeleteArtifact deletes specified artifact along with files if specified. + DeleteArtifact(storage backup.Storage, artifactID string, removeFiles bool) error } -// pitrTimerangeService provides methods that help us inspect PITR artifacts -type pitrTimerangeService interface { - // ListPITRTimeranges returns the available PITR timeranges for the given artifact in the provided location - ListPITRTimeranges(ctx context.Context, artifactName string, location *models.BackupLocation) ([]backup.Timeline, error) +// pbmPITRService provides methods that help us inspect PITR artifacts +type pbmPITRService interface { + // ListPITRTimeranges returns the available PITR timeranges for the given artifact in the provided location. + ListPITRTimeranges(ctx context.Context, locationClient backup.Storage, location *models.BackupLocation, artifact *models.Artifact) ([]backup.Timeline, error) } diff --git a/managed/services/management/backup/locations_service.go b/managed/services/management/backup/locations_service.go index b8c2aede00..9b64c835e6 100644 --- a/managed/services/management/backup/locations_service.go +++ b/managed/services/management/backup/locations_service.go @@ -271,7 +271,7 @@ func convertLocation(locationModel *models.BackupLocation) (*backuppb.Location, func (s *LocationsService) getBucketLocation(ctx context.Context, c *models.S3LocationConfig) (string, error) { bucketLocation, err := s.s3.GetBucketLocation(ctx, c.Endpoint, c.AccessKey, c.SecretKey, c.BucketName) if err != nil { - if minioErr, ok := err.(minio.ErrorResponse); ok { + if minioErr, ok := err.(minio.ErrorResponse); ok { //nolint:errorlint return "", status.Errorf(codes.InvalidArgument, "%s: %s.", minioErr.Code, minioErr.Message) } return "", status.Errorf(codes.Internal, "%s", err) @@ -283,7 +283,7 @@ func (s *LocationsService) getBucketLocation(ctx context.Context, c *models.S3Lo func (s *LocationsService) checkBucket(ctx context.Context, c *models.S3LocationConfig) error { exists, err := s.s3.BucketExists(ctx, c.Endpoint, c.AccessKey, c.SecretKey, c.BucketName) if err != nil { - if minioErr, ok := err.(minio.ErrorResponse); ok { + if minioErr, ok := err.(minio.ErrorResponse); ok { //nolint:errorlint return status.Errorf(codes.InvalidArgument, "%s: %s.", minioErr.Code, minioErr.Message) } diff --git a/managed/services/management/backup/locations_service_test.go b/managed/services/management/backup/locations_service_test.go index 17cf2c66fe..00bcb2108c 100644 --- a/managed/services/management/backup/locations_service_test.go +++ b/managed/services/management/backup/locations_service_test.go @@ -137,6 +137,7 @@ func TestListBackupLocations(t *testing.T) { checkLocation := func(id string, req *backuppb.AddLocationRequest) func() bool { return func() bool { for _, loc := range res.Locations { + //nolint:nestif if loc.LocationId == id { if loc.Name != req.Name || loc.Description != req.Description { return false @@ -322,7 +323,7 @@ func TestRemoveBackupLocation(t *testing.T) { _, err = svc.RemoveLocation(ctx, &backuppb.RemoveLocationRequest{ LocationId: "non-existing", }) - assert.EqualError(t, err, `rpc error: code = NotFound desc = Backup location with ID "non-existing" not found.`) + assert.ErrorIs(t, err, models.ErrNotFound) } func TestVerifyBackupLocationValidation(t *testing.T) { diff --git a/managed/services/management/backup/mock_pbm_pitr_service_test.go b/managed/services/management/backup/mock_pbm_pitr_service_test.go new file mode 100644 index 0000000000..93592d44b3 --- /dev/null +++ b/managed/services/management/backup/mock_pbm_pitr_service_test.go @@ -0,0 +1,40 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package backup + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + models "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services/backup" +) + +// mockPbmPITRService is an autogenerated mock type for the pbmPITRService type +type mockPbmPITRService struct { + mock.Mock +} + +// ListPITRTimeranges provides a mock function with given fields: ctx, locationClient, location, artifact +func (_m *mockPbmPITRService) ListPITRTimeranges(ctx context.Context, locationClient backup.Storage, location *models.BackupLocation, artifact *models.Artifact) ([]backup.Timeline, error) { + ret := _m.Called(ctx, locationClient, location, artifact) + + var r0 []backup.Timeline + if rf, ok := ret.Get(0).(func(context.Context, backup.Storage, *models.BackupLocation, *models.Artifact) []backup.Timeline); ok { + r0 = rf(ctx, locationClient, location, artifact) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]backup.Timeline) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, backup.Storage, *models.BackupLocation, *models.Artifact) error); ok { + r1 = rf(ctx, locationClient, location, artifact) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/managed/services/management/backup/mock_pitr_timerange_service_test.go b/managed/services/management/backup/mock_pitr_timerange_service_test.go deleted file mode 100644 index 43f7faa583..0000000000 --- a/managed/services/management/backup/mock_pitr_timerange_service_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package backup - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - models "github.com/percona/pmm/managed/models" - "github.com/percona/pmm/managed/services/backup" -) - -// mockPitrTimerangeService is an autogenerated mock type for the pitrTimerangeService type -type mockPitrTimerangeService struct { - mock.Mock -} - -// ListPITRTimeranges provides a mock function with given fields: ctx, artifactName, location -func (_m *mockPitrTimerangeService) ListPITRTimeranges(ctx context.Context, artifactName string, location *models.BackupLocation) ([]backup.Timeline, error) { - ret := _m.Called(ctx, artifactName, location) - - var r0 []backup.Timeline - if rf, ok := ret.Get(0).(func(context.Context, string, *models.BackupLocation) []backup.Timeline); ok { - r0 = rf(ctx, artifactName, location) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]backup.Timeline) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, *models.BackupLocation) error); ok { - r1 = rf(ctx, artifactName, location) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/managed/services/management/backup/mock_removal_service_test.go b/managed/services/management/backup/mock_removal_service_test.go index 0290b96886..75fc429d12 100644 --- a/managed/services/management/backup/mock_removal_service_test.go +++ b/managed/services/management/backup/mock_removal_service_test.go @@ -3,9 +3,9 @@ package backup import ( - context "context" - mock "github.com/stretchr/testify/mock" + + "github.com/percona/pmm/managed/services/backup" ) // mockRemovalService is an autogenerated mock type for the removalService type @@ -13,13 +13,13 @@ type mockRemovalService struct { mock.Mock } -// DeleteArtifact provides a mock function with given fields: ctx, artifactID, removeFiles -func (_m *mockRemovalService) DeleteArtifact(ctx context.Context, artifactID string, removeFiles bool) error { - ret := _m.Called(ctx, artifactID, removeFiles) +// DeleteArtifact provides a mock function with given fields: storage, artifactID, removeFiles +func (_m *mockRemovalService) DeleteArtifact(storage backup.Storage, artifactID string, removeFiles bool) error { + ret := _m.Called(storage, artifactID, removeFiles) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { - r0 = rf(ctx, artifactID, removeFiles) + if rf, ok := ret.Get(0).(func(backup.Storage, string, bool) error); ok { + r0 = rf(storage, artifactID, removeFiles) } else { r0 = ret.Error(0) } diff --git a/managed/services/management/dbaas/db_cluster_service.go b/managed/services/management/dbaas/db_cluster_service.go index ba8b45115a..d830887fbf 100644 --- a/managed/services/management/dbaas/db_cluster_service.go +++ b/managed/services/management/dbaas/db_cluster_service.go @@ -129,7 +129,7 @@ func (s DBClusterService) ListDBClusters(ctx context.Context, req *dbaasv1beta1. }, nil } -func (s DBClusterService) getClusterResource(instance dbaasv1.DBInstanceSpec) (diskSize int64, memory int64, cpu int, err error) { +func (s DBClusterService) getClusterResource(instance dbaasv1.DBInstanceSpec) (diskSize int64, memory int64, cpu int, err error) { //nolint:nonamedreturns disk, ok := (&instance.DiskSize).AsInt64() if ok { diskSize = disk diff --git a/managed/services/management/dbaas/dbaas_initializer.go b/managed/services/management/dbaas/dbaas_initializer.go index 961926adbb..01d7468a86 100644 --- a/managed/services/management/dbaas/dbaas_initializer.go +++ b/managed/services/management/dbaas/dbaas_initializer.go @@ -104,6 +104,7 @@ func (in *Initializer) Enable(ctx context.Context) error { // registerIncluster automatically adds k8s cluster to dbaas when PMM is running inside k8s cluster func (in *Initializer) registerInCluster(ctx context.Context) error { kubeConfig, err := in.dbaasClient.GetKubeConfig(ctx, &dbaascontrollerv1beta1.GetKubeconfigRequest{}) + //nolint:nestif if err == nil { // If err is not equal to nil, dont' register cluster and fail silently err := in.db.InTransaction(func(t *reform.TX) error { diff --git a/managed/services/management/dbaas/version_service_client.go b/managed/services/management/dbaas/version_service_client.go index 3f8e8f90e6..5cb30e4ee4 100644 --- a/managed/services/management/dbaas/version_service_client.go +++ b/managed/services/management/dbaas/version_service_client.go @@ -383,9 +383,13 @@ func (c *VersionServiceClient) GetVersionServiceURL() string { // It returns nil if update is not available or error occurred. It does not take PMM version into consideration. // We need to upgrade to current + 1 version for upgrade to be successful. So even if dbaas-controller does not support the // operator, we need to upgrade to it on our way to supported one. -func (c *VersionServiceClient) NextOperatorVersion(ctx context.Context, operatorType, installedVersion string) (nextOperatorVersion *goversion.Version, err error) { +func (c *VersionServiceClient) NextOperatorVersion( + ctx context.Context, + operatorType, + installedVersion string, +) (*goversion.Version, error) { if installedVersion == "" { - return + return nil, nil //nolint:nilnil } // Get all operator versions params := componentsParams{ @@ -393,10 +397,10 @@ func (c *VersionServiceClient) NextOperatorVersion(ctx context.Context, operator } matrix, err := c.Matrix(ctx, params) if err != nil { - return + return nil, err } if len(matrix.Versions) == 0 { - return + return nil, nil //nolint:nilnil } // Convert slice of version structs to slice of strings so it can be used in generic function next. @@ -409,7 +413,7 @@ func (c *VersionServiceClient) NextOperatorVersion(ctx context.Context, operator if installedVersion != "" { return next(versions, installedVersion) } - return + return nil, nil //nolint:nilnil } // next direct successor of given installed version, returns nil if there is none. diff --git a/managed/services/management/dbaas/version_service_client_test.go b/managed/services/management/dbaas/version_service_client_test.go index e0e25adfc2..b9d41e41db 100644 --- a/managed/services/management/dbaas/version_service_client_test.go +++ b/managed/services/management/dbaas/version_service_client_test.go @@ -78,6 +78,7 @@ func (f fakeLatestVersionServer) ServeHTTP(w http.ResponseWriter, r *http.Reques break } } + //nolint:nestif if certainVersionRequested { segments := strings.Split(r.URL.Path, "/") version := segments[len(segments)-2] diff --git a/managed/services/management/external.go b/managed/services/management/external.go index 7ca1be836d..933e02d990 100644 --- a/managed/services/management/external.go +++ b/managed/services/management/external.go @@ -85,7 +85,7 @@ func (e *ExternalService) AddExternal(ctx context.Context, req *managementpb.Add if err != nil { return err } - res.Service = invService.(*inventorypb.ExternalService) + res.Service = invService.(*inventorypb.ExternalService) //nolint:forcetypeassert if req.MetricsMode == managementpb.MetricsMode_AUTO { agentIDs, err := models.FindPMMAgentsRunningOnNode(tx.Querier, req.RunsOnNodeId) @@ -126,7 +126,7 @@ func (e *ExternalService) AddExternal(ctx context.Context, req *managementpb.Add if err != nil { return err } - res.ExternalExporter = agent.(*inventorypb.ExternalExporter) + res.ExternalExporter = agent.(*inventorypb.ExternalExporter) //nolint:forcetypeassert pmmAgentID = row.PMMAgentID return nil diff --git a/managed/services/management/grpc/actions_server.go b/managed/services/management/grpc/actions_server.go index 9d0e3a53f3..9beb7316be 100644 --- a/managed/services/management/grpc/actions_server.go +++ b/managed/services/management/grpc/actions_server.go @@ -146,7 +146,8 @@ func (s *actionsServer) StartMySQLExplainAction(ctx context.Context, req *manage return nil, status.Errorf(codes.FailedPrecondition, "Cannot find right agent") } - err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_DEFAULT, files, tdp, agents[0].TLSSkipVerify) + err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, //nolint:staticcheck + req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_DEFAULT, files, tdp, agents[0].TLSSkipVerify) if err != nil { return nil, err } @@ -174,7 +175,8 @@ func (s *actionsServer) StartMySQLExplainJSONAction(ctx context.Context, req *ma return nil, status.Errorf(codes.FailedPrecondition, "Cannot find right agent") } - err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_JSON, files, tdp, agents[0].TLSSkipVerify) + err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, //nolint:staticcheck + req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_JSON, files, tdp, agents[0].TLSSkipVerify) if err != nil { return nil, err } @@ -202,7 +204,8 @@ func (s *actionsServer) StartMySQLExplainTraditionalJSONAction(ctx context.Conte return nil, status.Errorf(codes.FailedPrecondition, "Cannot find right agent") } - err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_TRADITIONAL_JSON, files, tdp, agents[0].TLSSkipVerify) + err = s.a.StartMySQLExplainAction(ctx, res.ID, res.PMMAgentID, req.ServiceId, dsn, req.Query, //nolint:staticcheck + req.QueryId, req.Placeholders, agentpb.MysqlExplainOutputFormat_MYSQL_EXPLAIN_OUTPUT_FORMAT_TRADITIONAL_JSON, files, tdp, agents[0].TLSSkipVerify) if err != nil { return nil, err } diff --git a/managed/services/management/grpc/node_server.go b/managed/services/management/grpc/node_server.go index 9288d2a2b0..e9a549176e 100644 --- a/managed/services/management/grpc/node_server.go +++ b/managed/services/management/grpc/node_server.go @@ -35,6 +35,6 @@ func NewManagementNodeServer(s *management.NodeService) managementpb.NodeServer } // RegisterNode do registration of new Node. -func (s *nodeServer) RegisterNode(ctx context.Context, req *managementpb.RegisterNodeRequest) (res *managementpb.RegisterNodeResponse, err error) { +func (s *nodeServer) RegisterNode(ctx context.Context, req *managementpb.RegisterNodeRequest) (*managementpb.RegisterNodeResponse, error) { return s.svc.Register(ctx, req) } diff --git a/managed/services/management/haproxy.go b/managed/services/management/haproxy.go index fd32e56cf7..a3763f9749 100644 --- a/managed/services/management/haproxy.go +++ b/managed/services/management/haproxy.go @@ -76,7 +76,7 @@ func (e HAProxyService) AddHAProxy(ctx context.Context, req *managementpb.AddHAP if err != nil { return err } - res.Service = invService.(*inventorypb.HAProxyService) + res.Service = invService.(*inventorypb.HAProxyService) //nolint:forcetypeassert if req.MetricsMode == managementpb.MetricsMode_AUTO { agentIDs, err := models.FindPMMAgentsRunningOnNode(tx.Querier, req.NodeId) @@ -117,7 +117,7 @@ func (e HAProxyService) AddHAProxy(ctx context.Context, req *managementpb.AddHAP if err != nil { return err } - res.ExternalExporter = agent.(*inventorypb.ExternalExporter) + res.ExternalExporter = agent.(*inventorypb.ExternalExporter) //nolint:forcetypeassert pmmAgentID = row.PMMAgentID return nil diff --git a/managed/services/management/mongodb.go b/managed/services/management/mongodb.go index ca17485ae2..de7d063415 100644 --- a/managed/services/management/mongodb.go +++ b/managed/services/management/mongodb.go @@ -73,7 +73,7 @@ func (s *MongoDBService) Add(ctx context.Context, req *managementpb.AddMongoDBRe if err != nil { return err } - res.Service = invService.(*inventorypb.MongoDBService) + res.Service = invService.(*inventorypb.MongoDBService) //nolint:forcetypeassert mongoDBOptions := models.MongoDBOptionsFromRequest(req) @@ -109,7 +109,7 @@ func (s *MongoDBService) Add(ctx context.Context, req *managementpb.AddMongoDBRe if err != nil { return err } - res.MongodbExporter = agent.(*inventorypb.MongoDBExporter) + res.MongodbExporter = agent.(*inventorypb.MongoDBExporter) //nolint:forcetypeassert if req.QanMongodbProfiler { row, err = models.CreateAgent(tx.Querier, models.QANMongoDBProfilerAgentType, &models.CreateAgentParams{ @@ -132,7 +132,7 @@ func (s *MongoDBService) Add(ctx context.Context, req *managementpb.AddMongoDBRe if err != nil { return err } - res.QanMongodbProfiler = agent.(*inventorypb.QANMongoDBProfilerAgent) + res.QanMongodbProfiler = agent.(*inventorypb.QANMongoDBProfilerAgent) //nolint:forcetypeassert } return nil diff --git a/managed/services/management/mysql.go b/managed/services/management/mysql.go index 94fe518f19..8eeab76130 100644 --- a/managed/services/management/mysql.go +++ b/managed/services/management/mysql.go @@ -97,7 +97,7 @@ func (s *MySQLService) Add(ctx context.Context, req *managementpb.AddMySQLReques if err != nil { return err } - res.Service = invService.(*inventorypb.MySQLService) + res.Service = invService.(*inventorypb.MySQLService) //nolint:forcetypeassert req.MetricsMode, err = supportedMetricsMode(tx.Querier, req.MetricsMode, req.PmmAgentId) if err != nil { @@ -133,7 +133,7 @@ func (s *MySQLService) Add(ctx context.Context, req *managementpb.AddMySQLReques if err != nil { return err } - res.MysqldExporter = agent.(*inventorypb.MySQLdExporter) + res.MysqldExporter = agent.(*inventorypb.MySQLdExporter) //nolint:forcetypeassert if req.QanMysqlPerfschema { row, err = models.CreateAgent(tx.Querier, models.QANMySQLPerfSchemaAgentType, &models.CreateAgentParams{ @@ -156,7 +156,7 @@ func (s *MySQLService) Add(ctx context.Context, req *managementpb.AddMySQLReques if err != nil { return err } - res.QanMysqlPerfschema = agent.(*inventorypb.QANMySQLPerfSchemaAgent) + res.QanMysqlPerfschema = agent.(*inventorypb.QANMySQLPerfSchemaAgent) //nolint:forcetypeassert } if req.QanMysqlSlowlog { @@ -181,7 +181,7 @@ func (s *MySQLService) Add(ctx context.Context, req *managementpb.AddMySQLReques if err != nil { return err } - res.QanMysqlSlowlog = agent.(*inventorypb.QANMySQLSlowlogAgent) + res.QanMysqlSlowlog = agent.(*inventorypb.QANMySQLSlowlogAgent) //nolint:forcetypeassert } return nil diff --git a/managed/services/management/node.go b/managed/services/management/node.go index b975e6f4e0..edf56ce48e 100644 --- a/managed/services/management/node.go +++ b/managed/services/management/node.go @@ -126,7 +126,7 @@ func (s *NodeService) Register(ctx context.Context, req *managementpb.RegisterNo if err != nil { return err } - res.PmmAgent = a.(*inventorypb.PMMAgent) + res.PmmAgent = a.(*inventorypb.PMMAgent) //nolint:forcetypeassert _, err = models. CreateNodeExporter(tx.Querier, pmmAgent.AgentID, nil, isPushMode(req.MetricsMode), req.DisableCollectors, pointer.ToStringOrNil(req.AgentPassword), "") diff --git a/managed/services/management/node_mgmt.go b/managed/services/management/node_mgmt.go new file mode 100644 index 0000000000..694706b116 --- /dev/null +++ b/managed/services/management/node_mgmt.go @@ -0,0 +1,248 @@ +// Copyright (C) 2017 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package management + +import ( + "context" + "fmt" + "time" + + "github.com/AlekSi/pointer" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/types/known/timestamppb" + "gopkg.in/reform.v1" + + nodev1beta1 "github.com/percona/pmm/api/managementpb/node" + "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services" +) + +// MgmtNodeService represents a management API service for working with nodes. +type MgmtNodeService struct { + db *reform.DB + r agentsRegistry + vmClient victoriaMetricsClient + + nodev1beta1.UnimplementedMgmtNodeServer +} + +// NewMgmtNodeService creates MgmtNodeService instance. +func NewMgmtNodeService(db *reform.DB, r agentsRegistry, vmClient victoriaMetricsClient) *MgmtNodeService { + return &MgmtNodeService{ + db: db, + r: r, + vmClient: vmClient, + } +} + +const upQuery = `up{job=~".*_hr$"}` + +// ListNodes returns a filtered list of Nodes. +func (s *MgmtNodeService) ListNodes(ctx context.Context, req *nodev1beta1.ListNodeRequest) (*nodev1beta1.ListNodeResponse, error) { + filters := models.NodeFilters{ + NodeType: services.ProtoToModelNodeType(req.NodeType), + } + + var ( + nodes []*models.Node + agents []*models.Agent + services []*models.Service + ) + + errTX := s.db.InTransactionContext(ctx, nil, func(tx *reform.TX) error { + var err error + + nodes, err = models.FindNodes(s.db.Querier, filters) + if err != nil { + return err + } + + agents, err = models.FindAgents(s.db.Querier, models.AgentFilters{}) + if err != nil { + return err + } + + services, err = models.FindServices(s.db.Querier, models.ServiceFilters{}) + if err != nil { + return err + } + + return nil + }) + + if errTX != nil { + return nil, errTX + } + + convertAgentToProto := func(agent *models.Agent) *nodev1beta1.UniversalNode_Agent { + return &nodev1beta1.UniversalNode_Agent{ + AgentId: agent.AgentID, + AgentType: string(agent.AgentType), + Status: agent.Status, + IsConnected: s.r.IsConnected(agent.AgentID), + } + } + + aMap := make(map[string][]*nodev1beta1.UniversalNode_Agent, len(nodes)) + for _, a := range agents { + if a.NodeID != nil || a.RunsOnNodeID != nil { + var nodeID string + if a.NodeID != nil { + nodeID = pointer.GetString(a.NodeID) + } else { + nodeID = pointer.GetString(a.RunsOnNodeID) + } + aMap[nodeID] = append(aMap[nodeID], convertAgentToProto(a)) + } + } + + sMap := make(map[string][]*nodev1beta1.UniversalNode_Service, len(services)) + for _, s := range services { + sMap[s.NodeID] = append(sMap[s.NodeID], &nodev1beta1.UniversalNode_Service{ + ServiceId: s.ServiceID, + ServiceType: string(s.ServiceType), + ServiceName: s.ServiceName, + }) + } + + result, _, err := s.vmClient.Query(ctx, upQuery, time.Now()) + if err != nil { + return nil, errors.Wrap(err, "failed to execute an instant VM query") + } + + metrics := make(map[string]int, len(result.(model.Vector))) //nolint:forcetypeassert + for _, v := range result.(model.Vector) { //nolint:forcetypeassert + nodeID := string(v.Metric[model.LabelName("node_id")]) + // Sometimes we may see several metrics for the same node, so we just take the first one. + if _, ok := metrics[nodeID]; !ok { + metrics[nodeID] = int(v.Value) + } + } + + res := make([]*nodev1beta1.UniversalNode, len(nodes)) + for i, node := range nodes { + labels, err := node.GetCustomLabels() + if err != nil { + return nil, err + } + + uNode := &nodev1beta1.UniversalNode{ + Address: node.Address, + CustomLabels: labels, + NodeId: node.NodeID, + NodeName: node.NodeName, + NodeType: string(node.NodeType), + Az: node.AZ, + CreatedAt: timestamppb.New(node.CreatedAt), + ContainerId: pointer.GetString(node.ContainerID), + ContainerName: pointer.GetString(node.ContainerName), + Distro: node.Distro, + MachineId: pointer.GetString(node.MachineID), + NodeModel: node.NodeModel, + Region: pointer.GetString(node.Region), + UpdatedAt: timestamppb.New(node.UpdatedAt), + } + + if metric, ok := metrics[node.NodeID]; ok { + switch metric { + // We assume there can only be metric values of either 1(UP) or 0(DOWN). + case 0: + uNode.Status = nodev1beta1.UniversalNode_DOWN + case 1: + uNode.Status = nodev1beta1.UniversalNode_UP + } + } else { + uNode.Status = nodev1beta1.UniversalNode_UNKNOWN + } + + if uAgents, ok := aMap[node.NodeID]; ok { + uNode.Agents = uAgents + } + + if uServices, ok := sMap[node.NodeID]; ok { + uNode.Services = uServices + } + + res[i] = uNode + } + + return &nodev1beta1.ListNodeResponse{ + Nodes: res, + }, nil +} + +const nodeUpQuery = `up{job=~".*_hr$",node_id=%q}` + +// GetNode returns a single Node by ID. +func (s *MgmtNodeService) GetNode(ctx context.Context, req *nodev1beta1.GetNodeRequest) (*nodev1beta1.GetNodeResponse, error) { + node, err := models.FindNodeByID(s.db.Querier, req.NodeId) + if err != nil { + return nil, err + } + + result, _, err := s.vmClient.Query(ctx, fmt.Sprintf(nodeUpQuery, req.NodeId), time.Now()) + if err != nil { + return nil, errors.Wrap(err, "failed to execute an instant VM query") + } + + metrics := make(map[string]int, len(result.(model.Vector))) //nolint:forcetypeassert + for _, v := range result.(model.Vector) { //nolint:forcetypeassert + nodeID := string(v.Metric[model.LabelName("node_id")]) + // Sometimes we may see several metrics for the same node, so we just take the first one. + if _, ok := metrics[nodeID]; !ok { + metrics[nodeID] = int(v.Value) + } + } + + labels, err := node.GetCustomLabels() + if err != nil { + return nil, err + } + + uNode := &nodev1beta1.UniversalNode{ + Address: node.Address, + Az: node.AZ, + CreatedAt: timestamppb.New(node.CreatedAt), + ContainerId: pointer.GetString(node.ContainerID), + ContainerName: pointer.GetString(node.ContainerName), + CustomLabels: labels, + Distro: node.Distro, + MachineId: pointer.GetString(node.MachineID), + NodeId: node.NodeID, + NodeName: node.NodeName, + NodeType: string(node.NodeType), + NodeModel: node.NodeModel, + Region: pointer.GetString(node.Region), + UpdatedAt: timestamppb.New(node.UpdatedAt), + } + + if metric, ok := metrics[node.NodeID]; ok { + switch metric { + // We assume there can only be metric values of either 1(UP) or 0(DOWN). + case 0: + uNode.Status = nodev1beta1.UniversalNode_DOWN + case 1: + uNode.Status = nodev1beta1.UniversalNode_UP + } + } else { + uNode.Status = nodev1beta1.UniversalNode_UNKNOWN + } + + return &nodev1beta1.GetNodeResponse{ + Node: uNode, + }, nil +} diff --git a/managed/services/management/node_mgmt_test.go b/managed/services/management/node_mgmt_test.go new file mode 100644 index 0000000000..60297bd1a3 --- /dev/null +++ b/managed/services/management/node_mgmt_test.go @@ -0,0 +1,342 @@ +// Copyright (C) 2017 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package management + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "gopkg.in/reform.v1" + "gopkg.in/reform.v1/dialects/postgresql" + + "github.com/percona/pmm/api/inventorypb" + nodev1beta1 "github.com/percona/pmm/api/managementpb/node" + "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/utils/logger" + "github.com/percona/pmm/managed/utils/testdb" + "github.com/percona/pmm/managed/utils/tests" +) + +func TestMgmtNodeService(t *testing.T) { + t.Run("ListNodes", func(t *testing.T) { + now = models.Now() + + setup := func(t *testing.T) (ctx context.Context, s *MgmtNodeService, teardown func(t *testing.T)) { + t.Helper() + + origNowF := models.Now + models.Now = func() time.Time { + return now + } + + ctx = logger.Set(context.Background(), t.Name()) + uuid.SetRand(&tests.IDReader{}) + + sqlDB := testdb.Open(t, models.SetupFixtures, nil) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + + ar := &mockAgentsRegistry{} + ar.Test(t) + + vmClient := &mockVictoriaMetricsClient{} + vmClient.Test(t) + + s = NewMgmtNodeService(db, ar, vmClient) + + teardown = func(t *testing.T) { + models.Now = origNowF + uuid.SetRand(nil) + + require.NoError(t, sqlDB.Close()) + ar.AssertExpectations(t) + } + + return + } + + const ( + nodeExporterID = "/agent_id/00000000-0000-4000-8000-000000000001" + postgresqlServiceID = "/service_id/00000000-0000-4000-8000-000000000002" + ) + + t.Run("should output an unfiltered list of all nodes", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + metric := model.Vector{ + &model.Sample{ + Metric: model.Metric{ + "__name__": "up", + "node_id": "pmm-server", + }, + Timestamp: 1, + Value: 1, + }, + } + + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(metric, nil, nil).Times(2) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() + s.r.(*mockAgentsRegistry).On("IsConnected", nodeExporterID).Return(true).Once() + res, err := s.ListNodes(ctx, &nodev1beta1.ListNodeRequest{}) + require.NoError(t, err) + + expected := &nodev1beta1.ListNodeResponse{ + Nodes: []*nodev1beta1.UniversalNode{ + { + NodeId: "pmm-server", + NodeType: "generic", + NodeName: "pmm-server", + MachineId: "", + Distro: "", + NodeModel: "", + ContainerId: "", + ContainerName: "", + Address: "127.0.0.1", + Region: "", + Az: "", + CustomLabels: nil, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + Agents: []*nodev1beta1.UniversalNode_Agent{ + { + AgentId: nodeExporterID, + AgentType: "node_exporter", + Status: "UNKNOWN", + IsConnected: true, + }, + { + AgentId: models.PMMServerAgentID, + AgentType: "pmm-agent", + Status: "", + IsConnected: true, + }, + }, + Services: []*nodev1beta1.UniversalNode_Service{ + { + ServiceId: postgresqlServiceID, + ServiceType: "postgresql", + ServiceName: "pmm-server-postgresql", + }, + }, + Status: nodev1beta1.UniversalNode_UP, + }, + }, + } + + assert.Equal(t, expected, res) + }) + + t.Run("should output an empty list of nodes when filter condition is not satisfied", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(2) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() + s.r.(*mockAgentsRegistry).On("IsConnected", nodeExporterID).Return(true).Once() + + res, err := s.ListNodes(ctx, &nodev1beta1.ListNodeRequest{ + NodeType: inventorypb.NodeType_REMOTE_NODE, + }) + + require.NoError(t, err) + assert.Empty(t, res.Nodes) + }) + + t.Run("should output a list of nodes when filter condition is satisfied", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + metric := model.Vector{ + &model.Sample{ + Metric: model.Metric{ + "__name__": "up", + "node_id": "pmm-server", + }, + Timestamp: 1, + Value: 1, + }, + } + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(metric, nil, nil).Times(2) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() + s.r.(*mockAgentsRegistry).On("IsConnected", nodeExporterID).Return(true).Once() + + res, err := s.ListNodes(ctx, &nodev1beta1.ListNodeRequest{ + NodeType: inventorypb.NodeType_GENERIC_NODE, + }) + require.NoError(t, err) + + expected := &nodev1beta1.ListNodeResponse{ + Nodes: []*nodev1beta1.UniversalNode{ + { + NodeId: "pmm-server", + NodeType: "generic", + NodeName: "pmm-server", + MachineId: "", + Distro: "", + NodeModel: "", + ContainerId: "", + ContainerName: "", + Address: "127.0.0.1", + Region: "", + Az: "", + CustomLabels: nil, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + Agents: []*nodev1beta1.UniversalNode_Agent{ + { + AgentId: nodeExporterID, + AgentType: "node_exporter", + Status: "UNKNOWN", + IsConnected: true, + }, + { + AgentId: models.PMMServerAgentID, + AgentType: "pmm-agent", + Status: "", + IsConnected: true, + }, + }, + Services: []*nodev1beta1.UniversalNode_Service{ + { + ServiceId: postgresqlServiceID, + ServiceType: "postgresql", + ServiceName: "pmm-server-postgresql", + }, + }, + Status: nodev1beta1.UniversalNode_UP, + }, + }, + } + + assert.Equal(t, expected, res) + }) + }) + + t.Run("GetNode", func(t *testing.T) { + now := models.Now() + + setup := func(t *testing.T) (ctx context.Context, s *MgmtNodeService, teardown func(t *testing.T)) { + t.Helper() + + origNowF := models.Now + models.Now = func() time.Time { + return now + } + ctx = logger.Set(context.Background(), t.Name()) + uuid.SetRand(&tests.IDReader{}) + + sqlDB := testdb.Open(t, models.SetupFixtures, nil) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + + ar := &mockAgentsRegistry{} + ar.Test(t) + + vmClient := &mockVictoriaMetricsClient{} + vmClient.Test(t) + + s = NewMgmtNodeService(db, ar, vmClient) + + teardown = func(t *testing.T) { + models.Now = origNowF + uuid.SetRand(nil) + + require.NoError(t, sqlDB.Close()) + ar.AssertExpectations(t) + } + + return + } + + t.Run("should query the node by its id", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + metric := model.Vector{ + &model.Sample{ + Metric: model.Metric{ + "__name__": "up", + "node_id": "pmm-server", + }, + Timestamp: 1, + Value: 1, + }, + } + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(metric, nil, nil).Times(2) + + expected := &nodev1beta1.GetNodeResponse{ + Node: &nodev1beta1.UniversalNode{ + NodeId: "pmm-server", + NodeType: "generic", + NodeName: "pmm-server", + MachineId: "", + Distro: "", + NodeModel: "", + ContainerId: "", + ContainerName: "", + Address: "127.0.0.1", + Region: "", + Az: "", + CustomLabels: nil, + CreatedAt: timestamppb.New(now), + UpdatedAt: timestamppb.New(now), + Status: nodev1beta1.UniversalNode_UP, + }, + } + + node, err := s.GetNode(ctx, &nodev1beta1.GetNodeRequest{ + NodeId: models.PMMServerNodeID, + }) + + require.NoError(t, err) + assert.Equal(t, expected, node) + }) + + t.Run("should return an error if such node_id doesn't exist", func(t *testing.T) { + const nodeID = "00000000-0000-4000-8000-000000000000" + ctx, s, teardown := setup(t) + defer teardown(t) + + node, err := s.GetNode(ctx, &nodev1beta1.GetNodeRequest{ + NodeId: nodeID, + }) + + assert.Nil(t, node) + tests.AssertGRPCError(t, status.New(codes.NotFound, fmt.Sprintf("Node with ID %q not found.", nodeID)), err) + }) + + t.Run("should return an error if the node_id parameter is empty", func(t *testing.T) { + ctx, s, teardown := setup(t) + defer teardown(t) + + node, err := s.GetNode(ctx, &nodev1beta1.GetNodeRequest{ + NodeId: "", + }) + + assert.Nil(t, node) + tests.AssertGRPCError(t, status.New(codes.InvalidArgument, "Empty Node ID."), err) + }) + }) +} diff --git a/managed/services/management/node_test.go b/managed/services/management/node_test.go index afad51a304..aa6dd6dba5 100644 --- a/managed/services/management/node_test.go +++ b/managed/services/management/node_test.go @@ -37,29 +37,29 @@ import ( ) func TestNodeService(t *testing.T) { - setup := func(t *testing.T) (ctx context.Context, s *NodeService, teardown func(t *testing.T)) { - t.Helper() + t.Run("Register", func(t *testing.T) { + setup := func(t *testing.T) (ctx context.Context, s *NodeService, teardown func(t *testing.T)) { + t.Helper() - ctx = logger.Set(context.Background(), t.Name()) - uuid.SetRand(&tests.IDReader{}) + ctx = logger.Set(context.Background(), t.Name()) + uuid.SetRand(&tests.IDReader{}) - sqlDB := testdb.Open(t, models.SetupFixtures, nil) - db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + sqlDB := testdb.Open(t, models.SetupFixtures, nil) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - teardown = func(t *testing.T) { - uuid.SetRand(nil) + teardown = func(t *testing.T) { + uuid.SetRand(nil) - require.NoError(t, sqlDB.Close()) - } - var apiKeyProvider mockApiKeyProvider - apiKeyProvider.Test(t) - apiKeyProvider.On("CreateAdminAPIKey", ctx, mock.AnythingOfType("string")).Return(int64(0), "test-token", nil) - s = NewNodeService(db, &apiKeyProvider) + require.NoError(t, sqlDB.Close()) + } + var apiKeyProvider mockApiKeyProvider + apiKeyProvider.Test(t) + apiKeyProvider.On("CreateAdminAPIKey", ctx, mock.AnythingOfType("string")).Return(int64(0), "test-token", nil) + s = NewNodeService(db, &apiKeyProvider) - return - } + return + } - t.Run("Register", func(t *testing.T) { t.Run("New", func(t *testing.T) { ctx, s, teardown := setup(t) defer teardown(t) diff --git a/managed/services/management/postgresql.go b/managed/services/management/postgresql.go index 30bd83af82..0470970e2d 100644 --- a/managed/services/management/postgresql.go +++ b/managed/services/management/postgresql.go @@ -73,7 +73,7 @@ func (s *PostgreSQLService) Add(ctx context.Context, req *managementpb.AddPostgr if err != nil { return err } - res.Service = invService.(*inventorypb.PostgreSQLService) + res.Service = invService.(*inventorypb.PostgreSQLService) //nolint:forcetypeassert req.MetricsMode, err = supportedMetricsMode(tx.Querier, req.MetricsMode, req.PmmAgentId) if err != nil { @@ -107,7 +107,7 @@ func (s *PostgreSQLService) Add(ctx context.Context, req *managementpb.AddPostgr if err != nil { return err } - res.PostgresExporter = agent.(*inventorypb.PostgresExporter) + res.PostgresExporter = agent.(*inventorypb.PostgresExporter) //nolint:forcetypeassert if req.QanPostgresqlPgstatementsAgent { row, err = models.CreateAgent(tx.Querier, models.QANPostgreSQLPgStatementsAgentType, &models.CreateAgentParams{ @@ -129,7 +129,7 @@ func (s *PostgreSQLService) Add(ctx context.Context, req *managementpb.AddPostgr if err != nil { return err } - res.QanPostgresqlPgstatementsAgent = agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) + res.QanPostgresqlPgstatementsAgent = agent.(*inventorypb.QANPostgreSQLPgStatementsAgent) //nolint:forcetypeassert } if req.QanPostgresqlPgstatmonitorAgent { @@ -153,7 +153,7 @@ func (s *PostgreSQLService) Add(ctx context.Context, req *managementpb.AddPostgr if err != nil { return err } - res.QanPostgresqlPgstatmonitorAgent = agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) + res.QanPostgresqlPgstatmonitorAgent = agent.(*inventorypb.QANPostgreSQLPgStatMonitorAgent) //nolint:forcetypeassert } return nil diff --git a/managed/services/management/proxysql.go b/managed/services/management/proxysql.go index 22ffa8aa61..88b883bfff 100644 --- a/managed/services/management/proxysql.go +++ b/managed/services/management/proxysql.go @@ -71,7 +71,7 @@ func (s *ProxySQLService) Add(ctx context.Context, req *managementpb.AddProxySQL if err != nil { return err } - res.Service = invService.(*inventorypb.ProxySQLService) + res.Service = invService.(*inventorypb.ProxySQLService) //nolint:forcetypeassert req.MetricsMode, err = supportedMetricsMode(tx.Querier, req.MetricsMode, req.PmmAgentId) if err != nil { @@ -104,7 +104,7 @@ func (s *ProxySQLService) Add(ctx context.Context, req *managementpb.AddProxySQL if err != nil { return err } - res.ProxysqlExporter = agent.(*inventorypb.ProxySQLExporter) + res.ProxysqlExporter = agent.(*inventorypb.ProxySQLExporter) //nolint:forcetypeassert return nil }); e != nil { diff --git a/managed/services/management/rds.go b/managed/services/management/rds.go index 9d0b7213eb..849be3070b 100644 --- a/managed/services/management/rds.go +++ b/managed/services/management/rds.go @@ -226,7 +226,7 @@ func (s *RDSService) DiscoverRDS(ctx context.Context, req *managementpb.Discover // return better gRPC errors in typical cases err = wg.Wait() - if e, ok := errors.Cause(err).(awserr.Error); ok { + if e, ok := errors.Cause(err).(awserr.Error); ok { //nolint:errorlint switch { case e.Code() == "InvalidClientTokenId": return res, status.Error(codes.InvalidArgument, e.Message()) @@ -277,7 +277,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.Node = invNode.(*inventorypb.RemoteRDSNode) + res.Node = invNode.(*inventorypb.RemoteRDSNode) //nolint:forcetypeassert // add RDSExporter Agent if req.RdsExporter { @@ -296,7 +296,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.RdsExporter = invRDSExporter.(*inventorypb.RDSExporter) + res.RdsExporter = invRDSExporter.(*inventorypb.RDSExporter) //nolint:forcetypeassert } switch req.Engine { @@ -319,7 +319,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.Mysql = invService.(*inventorypb.MySQLService) + res.Mysql = invService.(*inventorypb.MySQLService) //nolint:forcetypeassert _, err = supportedMetricsMode(tx.Querier, req.MetricsMode, models.PMMServerAgentID) if err != nil { @@ -343,7 +343,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.MysqldExporter = invMySQLdExporter.(*inventorypb.MySQLdExporter) + res.MysqldExporter = invMySQLdExporter.(*inventorypb.MySQLdExporter) //nolint:forcetypeassert if !req.SkipConnectionCheck { if err = s.cc.CheckConnectionToService(ctx, tx.Querier, service, mysqldExporter); err != nil { @@ -371,7 +371,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.QanMysqlPerfschema = invQANAgent.(*inventorypb.QANMySQLPerfSchemaAgent) + res.QanMysqlPerfschema = invQANAgent.(*inventorypb.QANMySQLPerfSchemaAgent) //nolint:forcetypeassert } return nil @@ -395,7 +395,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.Postgresql = invService.(*inventorypb.PostgreSQLService) + res.Postgresql = invService.(*inventorypb.PostgreSQLService) //nolint:forcetypeassert _, err = supportedMetricsMode(tx.Querier, req.MetricsMode, models.PMMServerAgentID) if err != nil { @@ -419,7 +419,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.PostgresqlExporter = invPostgresExporter.(*inventorypb.PostgresExporter) + res.PostgresqlExporter = invPostgresExporter.(*inventorypb.PostgresExporter) //nolint:forcetypeassert if !req.SkipConnectionCheck { if err = s.cc.CheckConnectionToService(ctx, tx.Querier, service, postgresExporter); err != nil { @@ -445,7 +445,7 @@ func (s *RDSService) AddRDS(ctx context.Context, req *managementpb.AddRDSRequest if err != nil { return err } - res.QanPostgresqlPgstatements = invQANAgent.(*inventorypb.QANPostgreSQLPgStatementsAgent) + res.QanPostgresqlPgstatements = invQANAgent.(*inventorypb.QANPostgreSQLPgStatementsAgent) //nolint:forcetypeassert } return nil diff --git a/managed/services/management/service.go b/managed/services/management/service.go index a610c12ea3..4d9a49f68a 100644 --- a/managed/services/management/service.go +++ b/managed/services/management/service.go @@ -17,52 +17,18 @@ package management import ( "context" - "time" "github.com/AlekSi/pointer" - "github.com/pkg/errors" - "github.com/prometheus/common/model" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/reform.v1" "github.com/percona/pmm/api/inventorypb" "github.com/percona/pmm/api/managementpb" - agentv1beta1 "github.com/percona/pmm/api/managementpb/agent" - servicev1beta1 "github.com/percona/pmm/api/managementpb/service" "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services" ) -var serviceTypes = map[inventorypb.ServiceType]models.ServiceType{ - inventorypb.ServiceType_MYSQL_SERVICE: models.MySQLServiceType, - inventorypb.ServiceType_MONGODB_SERVICE: models.MongoDBServiceType, - inventorypb.ServiceType_POSTGRESQL_SERVICE: models.PostgreSQLServiceType, - inventorypb.ServiceType_PROXYSQL_SERVICE: models.ProxySQLServiceType, - inventorypb.ServiceType_HAPROXY_SERVICE: models.HAProxyServiceType, - inventorypb.ServiceType_EXTERNAL_SERVICE: models.ExternalServiceType, -} - -// A map to check if the service is supported. -// NOTE: known external services appear to match the vendor names, -// (e.g. "mysql", "mongodb", "postgresql", "proxysql", "haproxy"), -// which is why ServiceType_EXTERNAL_SERVICE is not part of this map. -var supportedServices = map[string]inventorypb.ServiceType{ - string(models.MySQLServiceType): inventorypb.ServiceType_MYSQL_SERVICE, - string(models.MongoDBServiceType): inventorypb.ServiceType_MONGODB_SERVICE, - string(models.PostgreSQLServiceType): inventorypb.ServiceType_POSTGRESQL_SERVICE, - string(models.ProxySQLServiceType): inventorypb.ServiceType_PROXYSQL_SERVICE, - string(models.HAProxyServiceType): inventorypb.ServiceType_HAPROXY_SERVICE, -} - -func convertServiceType(serviceType inventorypb.ServiceType) *models.ServiceType { - if serviceType == inventorypb.ServiceType_SERVICE_TYPE_INVALID { - return nil - } - result := serviceTypes[serviceType] - return &result -} - // ServiceService represents service for working with services. type ServiceService struct { db *reform.DB @@ -73,21 +39,6 @@ type ServiceService struct { managementpb.UnimplementedServiceServer } -type MgmtServiceService struct { - db *reform.DB - r agentsRegistry - state agentsStateUpdater - vmdb prometheusService - vmClient victoriaMetricsClient - - servicev1beta1.UnimplementedMgmtServiceServer -} - -type statusMetrics struct { - status int - serviceType string -} - // NewServiceService creates ServiceService instance. func NewServiceService(db *reform.DB, r agentsRegistry, state agentsStateUpdater, vmdb prometheusService) *ServiceService { return &ServiceService{ @@ -98,17 +49,6 @@ func NewServiceService(db *reform.DB, r agentsRegistry, state agentsStateUpdater } } -// NewMgmtServiceService creates MgmtServiceService instance. -func NewMgmtServiceService(db *reform.DB, r agentsRegistry, state agentsStateUpdater, vmdb prometheusService, vmClient victoriaMetricsClient) *MgmtServiceService { - return &MgmtServiceService{ - db: db, - r: r, - state: state, - vmdb: vmdb, - vmClient: vmClient, - } -} - // RemoveService removes Service with Agents. func (s *ServiceService) RemoveService(ctx context.Context, req *managementpb.RemoveServiceRequest) (*managementpb.RemoveServiceResponse, error) { err := s.validateRequest(req) @@ -118,7 +58,7 @@ func (s *ServiceService) RemoveService(ctx context.Context, req *managementpb.Re pmmAgentIDs := make(map[string]struct{}) var reloadPrometheusConfig bool - if e := s.db.InTransaction(func(tx *reform.TX) error { + errTX := s.db.InTransaction(func(tx *reform.TX) error { var service *models.Service var err error switch { @@ -187,9 +127,12 @@ func (s *ServiceService) RemoveService(ctx context.Context, req *managementpb.Re } return nil - }); e != nil { - return nil, e + }) + + if errTX != nil { + return nil, errTX } + for agentID := range pmmAgentIDs { s.state.RequestStateUpdate(ctx, agentID) } @@ -201,151 +144,8 @@ func (s *ServiceService) RemoveService(ctx context.Context, req *managementpb.Re } func (s *ServiceService) checkServiceType(service *models.Service, serviceType inventorypb.ServiceType) error { - if expected, ok := serviceTypes[serviceType]; ok && expected == service.ServiceType { + if expected, ok := services.ServiceTypes[serviceType]; ok && expected == service.ServiceType { return nil } return status.Error(codes.InvalidArgument, "wrong service type") } - -func (s *ServiceService) validateRequest(request *managementpb.RemoveServiceRequest) error { - if request.ServiceName == "" && request.ServiceId == "" { - return status.Error(codes.InvalidArgument, "service_id or service_name expected") - } - if request.ServiceName != "" && request.ServiceId != "" { - return status.Error(codes.InvalidArgument, "service_id or service_name expected; not both") - } - return nil -} - -// ListServices returns a filtered list of Services with some attributes from Agents and Nodes. -func (s *MgmtServiceService) ListServices(ctx context.Context, req *servicev1beta1.ListServiceRequest) (*servicev1beta1.ListServiceResponse, error) { - filters := models.ServiceFilters{ - NodeID: req.NodeId, - ServiceType: convertServiceType(req.ServiceType), - ExternalGroup: req.ExternalGroup, - } - - var services []*models.Service - var agents []*models.Agent - var nodes []*models.Node - - agentToAPI := func(agent *models.Agent) *agentv1beta1.UniversalAgent { - return &agentv1beta1.UniversalAgent{ - AgentId: agent.AgentID, - AgentType: string(agent.AgentType), - Status: agent.Status, - IsConnected: s.r.IsConnected(agent.AgentID), - } - } - - query := `pg_up{collector="exporter",job=~".*_hr$"} - or mysql_up{job=~".*_hr$"} - or mongodb_up{job=~".*_hr$"} - or proxysql_up{job=~".*_hr$"} - or haproxy_backend_status{state="UP"} - ` - result, _, err := s.vmClient.Query(ctx, query, time.Now()) - if err != nil { - return nil, errors.Wrap(err, "failed to execute an instant VM query") - } - - metrics := make(map[string]statusMetrics, len(result.(model.Vector))) - for _, v := range result.(model.Vector) { //nolint:forcetypeassert - serviceID := string(v.Metric[model.LabelName("service_id")]) - serviceType := string(v.Metric[model.LabelName("service_type")]) - metrics[serviceID] = statusMetrics{status: int(v.Value), serviceType: serviceType} - } - - // TODO: provide a higher level of data consistency guarantee by using a locking mechanism. - errTX := s.db.InTransaction(func(tx *reform.TX) error { - var err error - services, err = models.FindServices(tx.Querier, filters) - if err != nil { - return err - } - - agents, err = models.FindAgents(tx.Querier, models.AgentFilters{}) - if err != nil { - return err - } - - nodes, err = models.FindNodes(tx.Querier, models.NodeFilters{}) - if err != nil { - return err - } - - return nil - }) - - if errTX != nil { - return nil, errTX - } - - nodeMap := make(map[string]string, len(nodes)) - for _, node := range nodes { - nodeMap[node.NodeID] = node.NodeName - } - - resultSvc := make([]*servicev1beta1.UniversalService, len(services)) - for i, service := range services { - labels, err := service.GetCustomLabels() - if err != nil { - return nil, err - } - - svc := &servicev1beta1.UniversalService{ - Address: pointer.GetString(service.Address), - Agents: []*agentv1beta1.UniversalAgent{}, - Cluster: service.Cluster, - CreatedAt: timestamppb.New(service.CreatedAt), - CustomLabels: labels, - DatabaseName: service.DatabaseName, - Environment: service.Environment, - ExternalGroup: service.ExternalGroup, - NodeId: service.NodeID, - Port: uint32(pointer.GetUint16(service.Port)), - ReplicationSet: service.ReplicationSet, - ServiceId: service.ServiceID, - ServiceType: string(service.ServiceType), - ServiceName: service.ServiceName, - Socket: pointer.GetString(service.Socket), - UpdatedAt: timestamppb.New(service.UpdatedAt), - } - - if metric, ok := metrics[service.ServiceID]; ok { - switch metric.status { - // We assume there can only be values of either 1(UP) or 0(DOWN). - case 0: - svc.Status = servicev1beta1.UniversalService_DOWN - case 1: - svc.Status = servicev1beta1.UniversalService_UP - } - } else { - // In case there is no metric, we need to assign different values for supported and unsupported service types. - if _, ok := supportedServices[metric.serviceType]; ok { - svc.Status = servicev1beta1.UniversalService_UNKNOWN - } else { - svc.Status = servicev1beta1.UniversalService_STATUS_INVALID - } - } - - nodeName, ok := nodeMap[service.NodeID] - if ok { - svc.NodeName = nodeName - } - - var svcAgents []*agentv1beta1.UniversalAgent - - for _, agent := range agents { - if IsNodeAgent(agent, service) || IsVMAgent(agent, service) || IsServiceAgent(agent, service) { - svcAgents = append(svcAgents, agentToAPI(agent)) - } - - svc.Agents = svcAgents - } - - resultSvc[i] = svc - } - - return &servicev1beta1.ListServiceResponse{Services: resultSvc}, nil -} diff --git a/managed/services/management/service_mgmt.go b/managed/services/management/service_mgmt.go new file mode 100644 index 0000000000..aefa0dcc63 --- /dev/null +++ b/managed/services/management/service_mgmt.go @@ -0,0 +1,218 @@ +// Copyright (C) 2017 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package management + +import ( + "context" + "time" + + "github.com/AlekSi/pointer" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "gopkg.in/reform.v1" + + "github.com/percona/pmm/api/inventorypb" + "github.com/percona/pmm/api/managementpb" + agentv1beta1 "github.com/percona/pmm/api/managementpb/agent" + servicev1beta1 "github.com/percona/pmm/api/managementpb/service" + "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/services" +) + +// A map to check if the service is supported. +// NOTE: known external services appear to match the vendor names, +// (e.g. "mysql", "mongodb", "postgresql", "proxysql", "haproxy"), +// which is why ServiceType_EXTERNAL_SERVICE is not part of this map. +var supportedServices = map[string]inventorypb.ServiceType{ + string(models.MySQLServiceType): inventorypb.ServiceType_MYSQL_SERVICE, + string(models.MongoDBServiceType): inventorypb.ServiceType_MONGODB_SERVICE, + string(models.PostgreSQLServiceType): inventorypb.ServiceType_POSTGRESQL_SERVICE, + string(models.ProxySQLServiceType): inventorypb.ServiceType_PROXYSQL_SERVICE, + string(models.HAProxyServiceType): inventorypb.ServiceType_HAPROXY_SERVICE, +} + +// MgmtServiceService is a management service for working with services. +type MgmtServiceService struct { + db *reform.DB + r agentsRegistry + state agentsStateUpdater + vmdb prometheusService + vmClient victoriaMetricsClient + + servicev1beta1.UnimplementedMgmtServiceServer +} + +type statusMetrics struct { + status int + serviceType string +} + +// NewMgmtServiceService creates MgmtServiceService instance. +func NewMgmtServiceService(db *reform.DB, r agentsRegistry, state agentsStateUpdater, vmdb prometheusService, vmClient victoriaMetricsClient) *MgmtServiceService { + return &MgmtServiceService{ + db: db, + r: r, + state: state, + vmdb: vmdb, + vmClient: vmClient, + } +} + +func (s *ServiceService) validateRequest(request *managementpb.RemoveServiceRequest) error { + if request.ServiceName == "" && request.ServiceId == "" { + return status.Error(codes.InvalidArgument, "service_id or service_name expected") + } + if request.ServiceName != "" && request.ServiceId != "" { + return status.Error(codes.InvalidArgument, "service_id or service_name expected; not both") + } + return nil +} + +// ListServices returns a filtered list of Services with some attributes from Agents and Nodes. +func (s *MgmtServiceService) ListServices(ctx context.Context, req *servicev1beta1.ListServiceRequest) (*servicev1beta1.ListServiceResponse, error) { + filters := models.ServiceFilters{ + NodeID: req.NodeId, + ServiceType: services.ProtoToModelServiceType(req.ServiceType), + ExternalGroup: req.ExternalGroup, + } + + agentToAPI := func(agent *models.Agent) *agentv1beta1.UniversalAgent { + return &agentv1beta1.UniversalAgent{ + AgentId: agent.AgentID, + AgentType: string(agent.AgentType), + Status: agent.Status, + IsConnected: s.r.IsConnected(agent.AgentID), + } + } + + query := `pg_up{collector="exporter",job=~".*_hr$"} + or mysql_up{job=~".*_hr$"} + or mongodb_up{job=~".*_hr$"} + or proxysql_up{job=~".*_hr$"} + or haproxy_backend_status{state="UP"} + ` + result, _, err := s.vmClient.Query(ctx, query, time.Now()) + if err != nil { + return nil, errors.Wrap(err, "failed to execute an instant VM query") + } + + metrics := make(map[string]statusMetrics, len(result.(model.Vector))) //nolint:forcetypeassert + for _, v := range result.(model.Vector) { //nolint:forcetypeassert + serviceID := string(v.Metric[model.LabelName("service_id")]) + serviceType := string(v.Metric[model.LabelName("service_type")]) + metrics[serviceID] = statusMetrics{status: int(v.Value), serviceType: serviceType} + } + + var ( + services []*models.Service + agents []*models.Agent + nodes []*models.Node + ) + + errTX := s.db.InTransactionContext(ctx, nil, func(tx *reform.TX) error { + var err error + services, err = models.FindServices(tx.Querier, filters) + if err != nil { + return err + } + + agents, err = models.FindAgents(tx.Querier, models.AgentFilters{}) + if err != nil { + return err + } + + nodes, err = models.FindNodes(tx.Querier, models.NodeFilters{}) + if err != nil { + return err + } + + return nil + }) + + if errTX != nil { + return nil, errTX + } + + nodeMap := make(map[string]string, len(nodes)) + for _, node := range nodes { + nodeMap[node.NodeID] = node.NodeName + } + + resultSvc := make([]*servicev1beta1.UniversalService, len(services)) + for i, service := range services { + labels, err := service.GetCustomLabels() + if err != nil { + return nil, err + } + + svc := &servicev1beta1.UniversalService{ + Address: pointer.GetString(service.Address), + Agents: []*agentv1beta1.UniversalAgent{}, + Cluster: service.Cluster, + CreatedAt: timestamppb.New(service.CreatedAt), + CustomLabels: labels, + DatabaseName: service.DatabaseName, + Environment: service.Environment, + ExternalGroup: service.ExternalGroup, + NodeId: service.NodeID, + Port: uint32(pointer.GetUint16(service.Port)), + ReplicationSet: service.ReplicationSet, + ServiceId: service.ServiceID, + ServiceType: string(service.ServiceType), + ServiceName: service.ServiceName, + Socket: pointer.GetString(service.Socket), + UpdatedAt: timestamppb.New(service.UpdatedAt), + } + + if metric, ok := metrics[service.ServiceID]; ok { + switch metric.status { + // We assume there can only be values of either 1(UP) or 0(DOWN). + case 0: + svc.Status = servicev1beta1.UniversalService_DOWN + case 1: + svc.Status = servicev1beta1.UniversalService_UP + } + } else { + // In case there is no metric, we need to assign different values for supported and unsupported service types. + if _, ok := supportedServices[metric.serviceType]; ok { + svc.Status = servicev1beta1.UniversalService_UNKNOWN + } else { + svc.Status = servicev1beta1.UniversalService_STATUS_INVALID + } + } + + nodeName, ok := nodeMap[service.NodeID] + if ok { + svc.NodeName = nodeName + } + + var uAgents []*agentv1beta1.UniversalAgent + + for _, agent := range agents { + if IsNodeAgent(agent, service) || IsVMAgent(agent, service) || IsServiceAgent(agent, service) { + uAgents = append(uAgents, agentToAPI(agent)) + } + } + + svc.Agents = uAgents + resultSvc[i] = svc + } + + return &servicev1beta1.ListServiceResponse{Services: resultSvc}, nil +} diff --git a/managed/services/management/service_mgmt_test.go b/managed/services/management/service_mgmt_test.go new file mode 100644 index 0000000000..3b7d5982ea --- /dev/null +++ b/managed/services/management/service_mgmt_test.go @@ -0,0 +1,200 @@ +// Copyright (C) 2017 Percona LLC +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package management + +import ( + "context" + "testing" + + "github.com/AlekSi/pointer" + "github.com/google/uuid" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/reform.v1" + "gopkg.in/reform.v1/dialects/postgresql" + + servicev1beta1 "github.com/percona/pmm/api/managementpb/service" + "github.com/percona/pmm/managed/models" + "github.com/percona/pmm/managed/utils/logger" + "github.com/percona/pmm/managed/utils/testdb" + "github.com/percona/pmm/managed/utils/tests" +) + +func TestMgmtServiceService(t *testing.T) { + t.Run("List", func(t *testing.T) { + setup := func(t *testing.T) (context.Context, *MgmtServiceService, func(t *testing.T), *mockPrometheusService) { //nolint:unparam + t.Helper() + + ctx := logger.Set(context.Background(), t.Name()) + uuid.SetRand(&tests.IDReader{}) + + sqlDB := testdb.Open(t, models.SetupFixtures, nil) + db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) + + vmdb := &mockPrometheusService{} + vmdb.Test(t) + + state := &mockAgentsStateUpdater{} + state.Test(t) + + ar := &mockAgentsRegistry{} + ar.Test(t) + + vmClient := &mockVictoriaMetricsClient{} + + teardown := func(t *testing.T) { + uuid.SetRand(nil) + + require.NoError(t, sqlDB.Close()) + vmdb.AssertExpectations(t) + state.AssertExpectations(t) + ar.AssertExpectations(t) + } + s := NewMgmtServiceService(db, ar, state, vmdb, vmClient) + + return ctx, s, teardown, vmdb + } + + const ( + pgExporterID = "/agent_id/00000000-0000-4000-8000-000000000003" + pgStatStatementID = "/agent_id/00000000-0000-4000-8000-000000000004" + PMMAgentID = "/agent_id/00000000-0000-4000-8000-000000000007" + ) + + t.Run("Basic", func(t *testing.T) { + ctx, s, teardown, _ := setup(t) + defer teardown(t) + + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(3) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent + s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter + s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent + response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) + + require.NoError(t, err) + assert.Len(t, response.Services, 1) // PMM Server PostgreSQL service + assert.Len(t, response.Services[0].Agents, 3) + }) + + t.Run("RDS", func(t *testing.T) { + ctx, s, teardown, _ := setup(t) + defer teardown(t) + + node, err := models.CreateNode(s.db.Querier, models.RemoteRDSNodeType, &models.CreateNodeParams{ + NodeName: "test", + Address: "test-address", + Region: pointer.ToString("test-region"), + }) + require.NoError(t, err) + + service, err := models.AddNewService(s.db.Querier, models.MySQLServiceType, &models.AddDBMSServiceParams{ + ServiceName: "test-mysql", + NodeID: node.NodeID, + Address: pointer.ToString("127.0.0.1"), + Port: pointer.ToUint16(3306), + }) + require.NoError(t, err) + + pmmAgent, err := models.CreatePMMAgent(s.db.Querier, models.PMMServerNodeID, nil) + require.NoError(t, err) + + mysqldExporter, err := models.CreateAgent(s.db.Querier, models.MySQLdExporterType, &models.CreateAgentParams{ + PMMAgentID: pmmAgent.AgentID, + ServiceID: service.ServiceID, + Password: "password", + Username: "username", + }) + require.NoError(t, err) + + rdsExporter, err := models.CreateAgent(s.db.Querier, models.RDSExporterType, &models.CreateAgentParams{ + PMMAgentID: pmmAgent.AgentID, + ServiceID: service.ServiceID, + }) + require.NoError(t, err) + + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(7) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent + s.r.(*mockAgentsRegistry).On("IsConnected", pmmAgent.AgentID).Return(true).Once() // PMM Agent + s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter + s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent + s.r.(*mockAgentsRegistry).On("IsConnected", PMMAgentID).Return(false) // PMM Agent 2 + s.r.(*mockAgentsRegistry).On("IsConnected", mysqldExporter.AgentID).Return(false).Once() // MySQLd exporter + s.r.(*mockAgentsRegistry).On("IsConnected", rdsExporter.AgentID).Return(false).Once() // RDS exporter + + response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) + + require.NoError(t, err) + assert.Len(t, response.Services, 2) // PMM Server PostgreSQL service, MySQL service + assert.Len(t, response.Services[0].Agents, 4) + assert.Len(t, response.Services[1].Agents, 2) + }) + + t.Run("Azure", func(t *testing.T) { + ctx, s, teardown, _ := setup(t) + defer teardown(t) + + node, err := models.CreateNode(s.db.Querier, models.RemoteAzureDatabaseNodeType, &models.CreateNodeParams{ + NodeName: "test", + Address: "test-address", + Region: pointer.ToString("test-region"), + }) + require.NoError(t, err) + + service, err := models.AddNewService(s.db.Querier, models.MySQLServiceType, &models.AddDBMSServiceParams{ + ServiceName: "test-mysql", + NodeID: node.NodeID, + Address: pointer.ToString("127.0.0.1"), + Port: pointer.ToUint16(3306), + }) + require.NoError(t, err) + + pmmAgent, err := models.CreatePMMAgent(s.db.Querier, models.PMMServerNodeID, nil) + require.NoError(t, err) + + mysqldExporter, err := models.CreateAgent(s.db.Querier, models.MySQLdExporterType, &models.CreateAgentParams{ + PMMAgentID: pmmAgent.AgentID, + ServiceID: service.ServiceID, + Password: "password", + Username: "username", + }) + require.NoError(t, err) + + azureExporter, err := models.CreateAgent(s.db.Querier, models.AzureDatabaseExporterType, &models.CreateAgentParams{ + PMMAgentID: pmmAgent.AgentID, + ServiceID: service.ServiceID, + }) + require.NoError(t, err) + + s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(7) + s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent + s.r.(*mockAgentsRegistry).On("IsConnected", pmmAgent.AgentID).Return(true).Once() // PMM Agent + s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter + s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent + s.r.(*mockAgentsRegistry).On("IsConnected", PMMAgentID).Return(false) // PMM Agent 2 + s.r.(*mockAgentsRegistry).On("IsConnected", mysqldExporter.AgentID).Return(false).Once() // MySQLd exporter + s.r.(*mockAgentsRegistry).On("IsConnected", azureExporter.AgentID).Return(false).Once() // Azure exporter + + response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) + + require.NoError(t, err) + assert.Len(t, response.Services, 2) // PMM Server PostgreSQL service, MySQL service + assert.Len(t, response.Services[0].Agents, 4) + assert.Len(t, response.Services[1].Agents, 2) + }) + }) +} diff --git a/managed/services/management/service_test.go b/managed/services/management/service_test.go index 6b18ea1b33..5e9034ff31 100644 --- a/managed/services/management/service_test.go +++ b/managed/services/management/service_test.go @@ -22,9 +22,7 @@ import ( "github.com/AlekSi/pointer" "github.com/google/uuid" - "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -33,7 +31,6 @@ import ( "github.com/percona/pmm/api/inventorypb" "github.com/percona/pmm/api/managementpb" - servicev1beta1 "github.com/percona/pmm/api/managementpb/service" "github.com/percona/pmm/managed/models" "github.com/percona/pmm/managed/utils/logger" "github.com/percona/pmm/managed/utils/testdb" @@ -262,166 +259,4 @@ func TestServiceService(t *testing.T) { tests.AssertGRPCError(t, status.New(codes.NotFound, fmt.Sprintf(`Node with ID "%s" not found.`, node.NodeID)), err) }) }) - - t.Run("List", func(t *testing.T) { - setup := func(t *testing.T) (context.Context, *MgmtServiceService, func(t *testing.T), *mockPrometheusService) { //nolint:unparam - t.Helper() - - ctx := logger.Set(context.Background(), t.Name()) - uuid.SetRand(&tests.IDReader{}) - - sqlDB := testdb.Open(t, models.SetupFixtures, nil) - db := reform.NewDB(sqlDB, postgresql.Dialect, reform.NewPrintfLogger(t.Logf)) - - vmdb := &mockPrometheusService{} - vmdb.Test(t) - - state := &mockAgentsStateUpdater{} - state.Test(t) - - ar := &mockAgentsRegistry{} - ar.Test(t) - - vmClient := &mockVictoriaMetricsClient{} - - teardown := func(t *testing.T) { - uuid.SetRand(nil) - - require.NoError(t, sqlDB.Close()) - vmdb.AssertExpectations(t) - state.AssertExpectations(t) - ar.AssertExpectations(t) - } - s := NewMgmtServiceService(db, ar, state, vmdb, vmClient) - - return ctx, s, teardown, vmdb - } - - const ( - pgExporterID = "/agent_id/00000000-0000-4000-8000-000000000003" - pgStatStatementID = "/agent_id/00000000-0000-4000-8000-000000000004" - PMMAgentID = "/agent_id/00000000-0000-4000-8000-000000000007" - ) - - t.Run("Basic", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) - defer teardown(t) - - s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(3) - s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent - s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter - s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent - response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) - - require.NoError(t, err) - assert.Len(t, response.Services, 1) // PMM Server PostgreSQL service - assert.Len(t, response.Services[0].Agents, 3) - }) - - t.Run("RDS", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) - defer teardown(t) - - node, err := models.CreateNode(s.db.Querier, models.RemoteRDSNodeType, &models.CreateNodeParams{ - NodeName: "test", - Address: "test-address", - Region: pointer.ToString("test-region"), - }) - require.NoError(t, err) - - service, err := models.AddNewService(s.db.Querier, models.MySQLServiceType, &models.AddDBMSServiceParams{ - ServiceName: "test-mysql", - NodeID: node.NodeID, - Address: pointer.ToString("127.0.0.1"), - Port: pointer.ToUint16(3306), - }) - require.NoError(t, err) - - pmmAgent, err := models.CreatePMMAgent(s.db.Querier, models.PMMServerNodeID, nil) - require.NoError(t, err) - - mysqldExporter, err := models.CreateAgent(s.db.Querier, models.MySQLdExporterType, &models.CreateAgentParams{ - PMMAgentID: pmmAgent.AgentID, - ServiceID: service.ServiceID, - Password: "password", - Username: "username", - }) - require.NoError(t, err) - - rdsExporter, err := models.CreateAgent(s.db.Querier, models.RDSExporterType, &models.CreateAgentParams{ - PMMAgentID: pmmAgent.AgentID, - ServiceID: service.ServiceID, - }) - require.NoError(t, err) - - s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(7) - s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent - s.r.(*mockAgentsRegistry).On("IsConnected", pmmAgent.AgentID).Return(true).Once() // PMM Agent - s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter - s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent - s.r.(*mockAgentsRegistry).On("IsConnected", PMMAgentID).Return(false) // PMM Agent 2 - s.r.(*mockAgentsRegistry).On("IsConnected", mysqldExporter.AgentID).Return(false).Once() // MySQLd exporter - s.r.(*mockAgentsRegistry).On("IsConnected", rdsExporter.AgentID).Return(false).Once() // RDS exporter - - response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) - - require.NoError(t, err) - assert.Len(t, response.Services, 2) // PMM Server PostgreSQL service, MySQL service - assert.Len(t, response.Services[0].Agents, 4) - assert.Len(t, response.Services[1].Agents, 2) - }) - - t.Run("Azure", func(t *testing.T) { - ctx, s, teardown, _ := setup(t) - defer teardown(t) - - node, err := models.CreateNode(s.db.Querier, models.RemoteAzureDatabaseNodeType, &models.CreateNodeParams{ - NodeName: "test", - Address: "test-address", - Region: pointer.ToString("test-region"), - }) - require.NoError(t, err) - - service, err := models.AddNewService(s.db.Querier, models.MySQLServiceType, &models.AddDBMSServiceParams{ - ServiceName: "test-mysql", - NodeID: node.NodeID, - Address: pointer.ToString("127.0.0.1"), - Port: pointer.ToUint16(3306), - }) - require.NoError(t, err) - - pmmAgent, err := models.CreatePMMAgent(s.db.Querier, models.PMMServerNodeID, nil) - require.NoError(t, err) - - mysqldExporter, err := models.CreateAgent(s.db.Querier, models.MySQLdExporterType, &models.CreateAgentParams{ - PMMAgentID: pmmAgent.AgentID, - ServiceID: service.ServiceID, - Password: "password", - Username: "username", - }) - require.NoError(t, err) - - azureExporter, err := models.CreateAgent(s.db.Querier, models.AzureDatabaseExporterType, &models.CreateAgentParams{ - PMMAgentID: pmmAgent.AgentID, - ServiceID: service.ServiceID, - }) - require.NoError(t, err) - - s.vmClient.(*mockVictoriaMetricsClient).On("Query", ctx, mock.Anything, mock.Anything).Return(model.Vector{}, nil, nil).Times(7) - s.r.(*mockAgentsRegistry).On("IsConnected", models.PMMServerAgentID).Return(true).Once() // PMM Server Agent - s.r.(*mockAgentsRegistry).On("IsConnected", pmmAgent.AgentID).Return(true).Once() // PMM Agent - s.r.(*mockAgentsRegistry).On("IsConnected", pgExporterID).Return(false).Once() // PMM Server PostgreSQL exporter - s.r.(*mockAgentsRegistry).On("IsConnected", pgStatStatementID).Return(false).Once() // PMM Server PG Stat Statements agent - s.r.(*mockAgentsRegistry).On("IsConnected", PMMAgentID).Return(false) // PMM Agent 2 - s.r.(*mockAgentsRegistry).On("IsConnected", mysqldExporter.AgentID).Return(false).Once() // MySQLd exporter - s.r.(*mockAgentsRegistry).On("IsConnected", azureExporter.AgentID).Return(false).Once() // Azure exporter - - response, err := s.ListServices(ctx, &servicev1beta1.ListServiceRequest{}) - - require.NoError(t, err) - assert.Len(t, response.Services, 2) // PMM Server PostgreSQL service, MySQL service - assert.Len(t, response.Services[0].Agents, 4) - assert.Len(t, response.Services[1].Agents, 2) - }) - }) } diff --git a/managed/services/management/utils.go b/managed/services/management/utils.go index 85f6350968..246535709d 100644 --- a/managed/services/management/utils.go +++ b/managed/services/management/utils.go @@ -21,7 +21,7 @@ import ( "github.com/percona/pmm/managed/models" ) -// IsNodeAgent checks if agent runs on the same node as the service (p.e. pmm-agent). +// IsNodeAgent checks if agent runs on the same node as the service (e.g. pmm-agent). func IsNodeAgent(agent *models.Agent, service *models.Service) bool { return agent.ServiceID == nil && pointer.GetString(agent.RunsOnNodeID) == service.NodeID } diff --git a/managed/services/minio/client.go b/managed/services/minio/client.go index a508132c4a..103ac3b19d 100644 --- a/managed/services/minio/client.go +++ b/managed/services/minio/client.go @@ -122,6 +122,15 @@ func (c *Client) RemoveRecursive(ctx context.Context, endpoint, accessKey, secre return nil } +// Remove removes single objects from storage. +func (c *Client) Remove(ctx context.Context, endpoint, accessKey, secretKey, bucketName, objectName string) error { + mc, err := createMinioClient(endpoint, accessKey, secretKey) + if err != nil { + return err + } + return mc.RemoveObject(ctx, bucketName, objectName, minio.RemoveObjectOptions{}) +} + // List is a wrapper over the minio API to list all objects in the bucket. // It scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. diff --git a/managed/services/onboarding/tips.go b/managed/services/onboarding/tips.go index f7ef8e2775..011ee69181 100644 --- a/managed/services/onboarding/tips.go +++ b/managed/services/onboarding/tips.go @@ -95,7 +95,7 @@ func (t *TipsService) retrieveSystemTips() ([]*onboardingpb.TipModel, error) { } tips := make([]*models.OnboardingSystemTip, len(structs)) for i, s := range structs { - tips[i] = s.(*models.OnboardingSystemTip) + tips[i] = s.(*models.OnboardingSystemTip) //nolint:forcetypeassert } for _, tip := range tips { @@ -201,7 +201,7 @@ func (t *TipsService) retrieveUserTips(userID int) ([]*onboardingpb.TipModel, er func (t *TipsService) retrieveOrCreateUserTip(tx *reform.TX, userID int, tipID int64) (*models.OnboardingUserTip, error) { retrievedUserTip, err := t.retrieveUserTip(tx, userID, tipID) if err != nil { - if err == reform.ErrNoRows { + if errors.Is(err, reform.ErrNoRows) { retrievedUserTip, err = t.createUserTip(tx, userID, tipID) if err != nil { return nil, err @@ -216,13 +216,13 @@ func (t *TipsService) retrieveOrCreateUserTip(tx *reform.TX, userID int, tipID i func (t *TipsService) retrieveUserTip(tx *reform.TX, userID int, tipID int64) (*models.OnboardingUserTip, error) { res, err := tx.Querier.SelectOneFrom(models.OnboardingUserTipTable, "WHERE user_id = $1 AND tip_id = $2", userID, tipID) if err != nil { - if err == reform.ErrNoRows { + if errors.Is(err, reform.ErrNoRows) { return nil, err } return nil, errors.Wrap(err, "failed to retrieve system tip by id") } - return res.(*models.OnboardingUserTip), nil + return res.(*models.OnboardingUserTip), nil //nolint:forcetypeassert } func (t *TipsService) createUserTip(tx *reform.TX, userID int, tipID int64) (*models.OnboardingUserTip, error) { diff --git a/managed/services/scheduler/scheduler.go b/managed/services/scheduler/scheduler.go index 5a10d481ea..d5d6b2b274 100644 --- a/managed/services/scheduler/scheduler.go +++ b/managed/services/scheduler/scheduler.go @@ -322,6 +322,7 @@ func (s *Service) convertDBTask(dbTask *models.ScheduledTask) (Task, error) { Retention: data.Retention, Retries: data.Retries, RetryInterval: data.RetryInterval, + Folder: data.Folder, }, } case models.ScheduledMongoDBBackupTask: @@ -340,6 +341,7 @@ func (s *Service) convertDBTask(dbTask *models.ScheduledTask) (Task, error) { Retention: data.Retention, Retries: data.Retries, RetryInterval: data.RetryInterval, + Folder: data.Folder, }, } diff --git a/managed/services/scheduler/task.go b/managed/services/scheduler/task.go index 5a05ea4fb5..b0df978bdb 100644 --- a/managed/services/scheduler/task.go +++ b/managed/services/scheduler/task.go @@ -54,6 +54,7 @@ type BackupTaskParams struct { Retention uint32 Retries uint32 RetryInterval time.Duration + Folder string } // Validate checks backup task parameters for correctness. @@ -111,6 +112,7 @@ func (t *mySQLBackupTask) Run(ctx context.Context, scheduler *Service) error { ScheduleID: t.ID(), Retries: t.Retries, RetryInterval: t.RetryInterval, + Folder: t.Folder, }) return err } @@ -133,6 +135,7 @@ func (t *mySQLBackupTask) Data() *models.ScheduledTaskData { Mode: t.Mode, Retries: t.Retries, RetryInterval: t.RetryInterval, + Folder: t.Folder, }, }, } @@ -172,6 +175,7 @@ func (t *mongoDBBackupTask) Run(ctx context.Context, scheduler *Service) error { ScheduleID: t.ID(), Retries: t.Retries, RetryInterval: t.RetryInterval, + Folder: t.Folder, }) return err } @@ -194,6 +198,7 @@ func (t *mongoDBBackupTask) Data() *models.ScheduledTaskData { Retention: t.Retention, Retries: t.Retries, RetryInterval: t.RetryInterval, + Folder: t.Folder, }, }, } diff --git a/managed/services/supervisord/logs.go b/managed/services/supervisord/logs.go index 1b78998bd0..1e9ace820f 100644 --- a/managed/services/supervisord/logs.go +++ b/managed/services/supervisord/logs.go @@ -324,7 +324,7 @@ func readLog(name string, maxLines int, maxBytes int64) ([]byte, time.Time, erro res := make([]byte, 0, maxBytes) r.Do(func(v interface{}) { if v != nil { - res = append(res, v.([]byte)...) + res = append(res, v.([]byte)...) //nolint:forcetypeassert } }) return res, m, nil diff --git a/managed/services/supervisord/pmm_config.go b/managed/services/supervisord/pmm_config.go index f7e52bbe4b..0ffda3f980 100644 --- a/managed/services/supervisord/pmm_config.go +++ b/managed/services/supervisord/pmm_config.go @@ -113,6 +113,7 @@ command = -c pg_stat_statements.max=10000 -c pg_stat_statements.track=all -c pg_stat_statements.save=off + -c logging_collector=off user = postgres autorestart = true autostart = true diff --git a/managed/services/telemetry/config.default.yml b/managed/services/telemetry/config.default.yml index a80437355b..4d63fc7da4 100644 --- a/managed/services/telemetry/config.default.yml +++ b/managed/services/telemetry/config.default.yml @@ -284,6 +284,40 @@ telemetry: - metric_name: "postgresql_version" label: "short_version" + - id: PostgresDBInstalledExtensions + source: VM + query: pg_extensions{service_name!="pmm-server-postgresql"} + transform: + type: JSON + metric: postgresql_installed_extensions + summary: "Installed extensions on the PostgresSQL instance" + data: + - metric_name: "1" + label: "name" + - metric_name: "2" + label: "version" + - metric_name: "3" + label: "relocatable" + - metric_name: "4" + label: "service_id" + + - id: PostgresDBAvailableExtensions + source: VM + query: pg_available_extensions{service_name!="pmm-server-postgresql"} + transform: + type: JSON + metric: postgresql_available_extensions + summary: "Available extensions on the PostgresSQL instance" + data: + - metric_name: "1" + label: "name" + - metric_name: "2" + label: "default_version" + - metric_name: "3" + label: "installed_version" + - metric_name: "4" + label: "service_id" + - id: HAProxyVersion source: VM query: haproxy_process_build_info @@ -608,6 +642,14 @@ telemetry: - metric_name: "pmm_server_node_os_version" label: "release" + - id: PMMOSNameAndVersion + source: VM + query: node_os_info{node_id=~"pmm-server"} + summary: "PMM node OS name and version" + data: + - metric_name: "pmm_server_node_os_pretty_name_and_version" + label: "pretty_name" + # DBaaS - id: DBaaSServicesCount source: PMMDB_SELECT diff --git a/managed/services/utils.go b/managed/services/utils.go index 50ba2c51b2..c67ba0518f 100644 --- a/managed/services/utils.go +++ b/managed/services/utils.go @@ -20,11 +20,15 @@ import ( "github.com/sirupsen/logrus" ) -// GroupChecksByDB splits provided checks by database and returns three slices: for MySQL, for PostgreSQL and for MongoDB. -func GroupChecksByDB(l *logrus.Entry, checks map[string]check.Check) (mySQLChecks, postgreSQLChecks, mongoDBChecks map[string]check.Check) { - mySQLChecks = make(map[string]check.Check) - postgreSQLChecks = make(map[string]check.Check) - mongoDBChecks = make(map[string]check.Check) +// GroupChecksByDB splits provided checks by database and returns three +// slices: for MySQL, for PostgreSQL and for MongoDB. +func GroupChecksByDB( + l *logrus.Entry, + checks map[string]check.Check, +) (map[string]check.Check, map[string]check.Check, map[string]check.Check) { + mySQLChecks := make(map[string]check.Check) + postgreSQLChecks := make(map[string]check.Check) + mongoDBChecks := make(map[string]check.Check) for _, c := range checks { switch c.Version { case 1: @@ -67,5 +71,5 @@ func GroupChecksByDB(l *logrus.Entry, checks map[string]check.Check) (mySQLCheck } } - return + return mySQLChecks, postgreSQLChecks, mongoDBChecks } diff --git a/managed/services/victoriametrics/scrape_configs.go b/managed/services/victoriametrics/scrape_configs.go index 0b2fcdc70b..977a7b5dbd 100644 --- a/managed/services/victoriametrics/scrape_configs.go +++ b/managed/services/victoriametrics/scrape_configs.go @@ -239,6 +239,7 @@ func scrapeConfigsForNodeExporter(s *models.MetricsResolutions, params *scrapeCo "entropy", "textfile.lr", "uname", + "os", } lrCollect = collectors.FilterOutCollectors("", lrCollect, params.agent.DisabledCollectors) lr, err = scrapeConfigForStandardExporter("lr", s.LR, params, lrCollect) @@ -419,6 +420,7 @@ func scrapeConfigsForPostgresExporter(s *models.MetricsResolutions, params *scra "custom_query.hr", "standard.go", "standard.process", + "postgres", } hrOptions = collectors.FilterOutCollectors("", hrOptions, params.agent.DisabledCollectors) hr, err := scrapeConfigForStandardExporter("hr", s.HR, params, hrOptions) diff --git a/managed/services/victoriametrics/scrape_configs_test.go b/managed/services/victoriametrics/scrape_configs_test.go index 7eebe341e8..7969431400 100644 --- a/managed/services/victoriametrics/scrape_configs_test.go +++ b/managed/services/victoriametrics/scrape_configs_test.go @@ -153,6 +153,7 @@ func TestScrapeConfig(t *testing.T) { }, Params: url.Values{"collect[]": []string{ "bonding", + "os", "textfile.lr", "uname", }}, @@ -863,6 +864,7 @@ func TestScrapeConfig(t *testing.T) { Params: url.Values{"collect[]": []string{ "custom_query.hr", "exporter", + "postgres", "standard.go", }}, }, { diff --git a/managed/services/victoriametrics/victoriametrics_test.go b/managed/services/victoriametrics/victoriametrics_test.go index cc6a67a70c..b8e2e6a2c7 100644 --- a/managed/services/victoriametrics/victoriametrics_test.go +++ b/managed/services/victoriametrics/victoriametrics_test.go @@ -638,6 +638,7 @@ scrape_configs: collect[]: - custom_query.hr - exporter + - postgres - standard.go - standard.process scrape_interval: 5s diff --git a/managed/services/vmalert/external_rules.go b/managed/services/vmalert/external_rules.go index b65cca1f80..e87030895e 100644 --- a/managed/services/vmalert/external_rules.go +++ b/managed/services/vmalert/external_rules.go @@ -43,7 +43,7 @@ func NewExternalRules() *ExternalRules { // ValidateRules validates alerting rules. func (s *ExternalRules) ValidateRules(ctx context.Context, rules string) error { err := validators.ValidateAlertingRules(ctx, rules) - if e, ok := err.(*validators.InvalidAlertingRuleError); ok { + if e, ok := err.(*validators.InvalidAlertingRuleError); ok { //nolint:errorlint return status.Errorf(codes.InvalidArgument, e.Msg) } return err diff --git a/managed/testdata/supervisord.d/pmm-db_enabled.ini b/managed/testdata/supervisord.d/pmm-db_enabled.ini index 88e4de1e8d..4d4ad4afd9 100644 --- a/managed/testdata/supervisord.d/pmm-db_enabled.ini +++ b/managed/testdata/supervisord.d/pmm-db_enabled.ini @@ -33,6 +33,7 @@ command = -c pg_stat_statements.max=10000 -c pg_stat_statements.track=all -c pg_stat_statements.save=off + -c logging_collector=off user = postgres autorestart = true autostart = true diff --git a/managed/utils/envvars/parser.go b/managed/utils/envvars/parser.go index 4d05a1a42f..32b66e410b 100644 --- a/managed/utils/envvars/parser.go +++ b/managed/utils/envvars/parser.go @@ -69,7 +69,7 @@ func (e InvalidDurationError) Error() string { return string(e) } // - the environment variables prefixed with GF_ passed as related to Grafana. // - the environment variables relating to proxies // - the environment variable set by podman -func ParseEnvVars(envs []string) (envSettings *models.ChangeSettingsParams, errs []error, warns []string) { //nolint:cyclop +func ParseEnvVars(envs []string) (envSettings *models.ChangeSettingsParams, errs []error, warns []string) { //nolint:cyclop,nonamedreturns envSettings = &models.ChangeSettingsParams{} for _, env := range envs { @@ -321,7 +321,7 @@ func GetEnv(key, fallback string) string { } func formatEnvVariableError(err error, env, value string) error { - switch e := err.(type) { + switch e := err.(type) { //nolint:errorlint case InvalidDurationError: return fmt.Errorf("environment variable %q has invalid duration %s", env, value) default: diff --git a/managed/utils/interceptors/grpc_extension.go b/managed/utils/interceptors/grpc_extension.go index 4e9cf10b87..83455c85e5 100644 --- a/managed/utils/interceptors/grpc_extension.go +++ b/managed/utils/interceptors/grpc_extension.go @@ -61,7 +61,7 @@ func getCallerOriginStr(ctx context.Context) string { if v == nil { return "" } - return string(v.(callerOrigin)) + return string(v.(callerOrigin)) //nolint:forcetypeassert } func callerOriginFromRequest(ctx context.Context, method string) callerOrigin { diff --git a/managed/utils/logger/global.go b/managed/utils/logger/global.go index fd229fcb13..2d9a50c7c2 100644 --- a/managed/utils/logger/global.go +++ b/managed/utils/logger/global.go @@ -32,16 +32,16 @@ func SetupGlobalLogger() { FullTimestamp: true, TimestampFormat: "2006-01-02T15:04:05.000-07:00", - CallerPrettyfier: func(f *runtime.Frame) (function string, file string) { - _, function = filepath.Split(f.Function) + CallerPrettyfier: func(f *runtime.Frame) (string, string) { + _, function := filepath.Split(f.Function) // keep a single directory name as a compromise between brevity and unambiguity var dir string - dir, file = filepath.Split(f.File) + dir, file := filepath.Split(f.File) dir = filepath.Base(dir) file = fmt.Sprintf("%s/%s:%d", dir, file, f.Line) - return + return function, file }, }) } diff --git a/managed/utils/logger/logger.go b/managed/utils/logger/logger.go index 2c9ebb9c70..b47b60f28e 100644 --- a/managed/utils/logger/logger.go +++ b/managed/utils/logger/logger.go @@ -32,7 +32,7 @@ func Get(ctx context.Context) *logrus.Entry { if v == nil { panic("context logger not set") } - return v.(*logrus.Entry) + return v.(*logrus.Entry) //nolint:forcetypeassert } // Set returns derived context with set logrus entry with given request ID. diff --git a/managed/utils/tests/aws.go b/managed/utils/tests/aws.go index 816727246c..8537fa1a72 100644 --- a/managed/utils/tests/aws.go +++ b/managed/utils/tests/aws.go @@ -21,12 +21,12 @@ import ( ) // GetAWSKeys returns testing AWS keys. -func GetAWSKeys(tb testing.TB) (accessKey, secretKey string) { +func GetAWSKeys(tb testing.TB) (string, string) { tb.Helper() - accessKey, secretKey = os.Getenv("AWS_ACCESS_KEY"), os.Getenv("AWS_SECRET_KEY") + accessKey, secretKey := os.Getenv("AWS_ACCESS_KEY"), os.Getenv("AWS_SECRET_KEY") if accessKey == "" || secretKey == "" { tb.Skip("Environment variables AWS_ACCESS_KEY / AWS_SECRET_KEY are not defined, skipping test") } - return + return accessKey, secretKey } diff --git a/managed/utils/validators/alerting_rules.go b/managed/utils/validators/alerting_rules.go index c11a7a7648..81fb8aa66f 100644 --- a/managed/utils/validators/alerting_rules.go +++ b/managed/utils/validators/alerting_rules.go @@ -63,7 +63,7 @@ func ValidateAlertingRules(ctx context.Context, rules string) error { b, err := cmd.CombinedOutput() logrus.Debugf("ValidateAlertingRules: %v\n%s", err, b) if err != nil { - if e, ok := err.(*exec.ExitError); ok && e.ExitCode() != 0 { + if e, ok := err.(*exec.ExitError); ok && e.ExitCode() != 0 { //nolint:errorlint return &InvalidAlertingRuleError{ Msg: "Invalid alerting rules.", } diff --git a/qan-api2/db.go b/qan-api2/db.go index 79309159d6..d1845de681 100644 --- a/qan-api2/db.go +++ b/qan-api2/db.go @@ -40,7 +40,7 @@ const ( func NewDB(dsn string, maxIdleConns, maxOpenConns int) *sqlx.DB { db, err := sqlx.Connect("clickhouse", dsn) if err != nil { - if exception, ok := err.(*clickhouse.Exception); ok && exception.Code == databaseNotExistErrorCode { + if exception, ok := err.(*clickhouse.Exception); ok && exception.Code == databaseNotExistErrorCode { //nolint:errorlint err = createDB(dsn) if err != nil { log.Fatalf("Database wasn't created: %v", err) diff --git a/qan-api2/main.go b/qan-api2/main.go index c9dbb97812..79feac9b1b 100644 --- a/qan-api2/main.go +++ b/qan-api2/main.go @@ -286,16 +286,16 @@ func main() { FullTimestamp: true, TimestampFormat: "2006-01-02T15:04:05.000-07:00", - CallerPrettyfier: func(f *runtime.Frame) (function string, file string) { - _, function = filepath.Split(f.Function) + CallerPrettyfier: func(f *runtime.Frame) (string, string) { + _, function := filepath.Split(f.Function) // keep a single directory name as a compromise between brevity and unambiguity var dir string - dir, file = filepath.Split(f.File) + dir, file := filepath.Split(f.File) dir = filepath.Base(dir) file = fmt.Sprintf("%s/%s:%d", dir, file, f.Line) - return + return function, file }, }) @@ -332,7 +332,7 @@ func main() { go func() { s := <-signals signal.Stop(signals) - log.Printf("Got %s, shutting down...\n", unix.SignalName(s.(unix.Signal))) + log.Printf("Got %s, shutting down...\n", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert cancel() }() diff --git a/qan-api2/models/data_ingestion.go b/qan-api2/models/data_ingestion.go index d512b1507d..8df0225aaf 100644 --- a/qan-api2/models/data_ingestion.go +++ b/qan-api2/models/data_ingestion.go @@ -725,27 +725,27 @@ func (mb *MetricsBucket) Save(agentMsg *qanpb.CollectRequest) error { //nolint:u } // mapToArrsStrStr converts map into two lists. -func mapToArrsStrStr(m map[string]string) (keys []string, values []string) { - keys = make([]string, 0, len(m)) - values = make([]string, 0, len(m)) +func mapToArrsStrStr(m map[string]string) ([]string, []string) { + keys := make([]string, 0, len(m)) + values := make([]string, 0, len(m)) for k, v := range m { keys = append(keys, k) values = append(values, v) } - return + return keys, values } // mapToArrsIntInt converts map into two lists. -func mapToArrsIntInt(m map[uint64]uint64) (keys []uint64, values []uint64) { - keys = make([]uint64, 0, len(m)) - values = make([]uint64, 0, len(m)) +func mapToArrsIntInt(m map[uint64]uint64) ([]uint64, []uint64) { + keys := make([]uint64, 0, len(m)) + values := make([]uint64, 0, len(m)) for k, v := range m { keys = append(keys, k) values = append(values, v) } - return + return keys, values } // check interfaces diff --git a/qan-api2/models/metrics.go b/qan-api2/models/metrics.go index 98c88bc19e..169c281549 100644 --- a/qan-api2/models/metrics.go +++ b/qan-api2/models/metrics.go @@ -811,7 +811,7 @@ func (m *Metrics) GetFingerprintByQueryID(ctx context.Context, queryID string) ( var fingerprint string err := m.db.GetContext(queryCtx, &fingerprint, fingerprintByQueryID, []interface{}{queryID}...) if err != nil && !errors.Is(err, sql.ErrNoRows) { - return "", fmt.Errorf("QueryxContext error:%v", err) + return "", fmt.Errorf("QueryxContext error:%v", err) //nolint:errorlint } return fingerprint, nil @@ -827,7 +827,7 @@ func (m *Metrics) SelectQueryPlan(ctx context.Context, queryID string) (*qanpb.Q var res qanpb.QueryPlanReply err := m.db.GetContext(queryCtx, &res, planByQueryID, []interface{}{queryID}) //nolint:asasalint if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, fmt.Errorf("QueryxContext error:%v", err) + return nil, fmt.Errorf("QueryxContext error:%v", err) //nolint:errorlint } return &res, nil @@ -978,7 +978,7 @@ func (m *Metrics) QueryExists(ctx context.Context, serviceID, query string) (boo return false, nil } -const queryByQueryIDTmpl = `SELECT fingerprint, placeholders_count FROM metrics +const queryByQueryIDTmpl = `SELECT explain_fingerprint, fingerprint, placeholders_count FROM metrics WHERE service_id = :service_id AND queryid = :query_id LIMIT 1; ` @@ -1012,17 +1012,135 @@ func (m *Metrics) ExplainFingerprintByQueryID(ctx context.Context, serviceID, qu } defer rows.Close() //nolint:errcheck + var fingerprint string for rows.Next() { err = rows.Scan( &res.ExplainFingerprint, + &fingerprint, &res.PlaceholdersCount) if err != nil { return res, errors.Wrap(err, "failed to scan query") } + if res.ExplainFingerprint == "" { + res.ExplainFingerprint = fingerprint + } + return res, nil //nolint:staticcheck } return res, errors.New("query_id doesnt exists") } + +const selectedQueryMetadataTmpl = ` +SELECT any(service_name), + any(database), + any(schema), + any(username), + any(replication_set), + any(cluster), + any(service_type), + any(service_id), + any(environment), + any(node_id), + any(node_name), + any(node_type) +FROM metrics +WHERE period_start >= :period_start_from AND period_start <= :period_start_to +{{ if not .Totals }} AND {{ .Group }} = '{{ .DimensionVal }}' +{{ end }} +{{ if .Dimensions }} + {{range $key, $vals := .Dimensions }} + AND {{ $key }} IN ( '{{ StringsJoin $vals "', '" }}' ) + {{ end }} +{{ end }} +{{ if .Labels }}{{$i := 0}} + AND ({{range $key, $vals := .Labels }}{{ $i = inc $i}} + {{ if gt $i 1}} OR {{ end }} has(['{{ StringsJoin $vals "', '" }}'], labels.value[indexOf(labels.key, '{{ $key }}')]) + {{ end }}) +{{ end }} +{{ if not .Totals }} GROUP BY {{ .Group }} +{{ end }} +WITH TOTALS; +` + +// GetSelectedQueryMetadata returns metadata for given query ID. +func (m *Metrics) GetSelectedQueryMetadata(ctx context.Context, periodStartFromSec, periodStartToSec int64, filter, group string, + dimensions, labels map[string][]string, totals bool, +) (*qanpb.GetSelectedQueryMetadataReply, error) { + arg := map[string]interface{}{ + "period_start_from": periodStartFromSec, + "period_start_to": periodStartToSec, + } + + tmplArgs := struct { + PeriodStartFrom int64 + PeriodStartTo int64 + PeriodDuration int64 + Dimensions map[string][]string + Labels map[string][]string + DimensionVal string + Group string + Totals bool + }{ + PeriodStartFrom: periodStartFromSec, + PeriodStartTo: periodStartToSec, + PeriodDuration: periodStartToSec - periodStartFromSec, + Dimensions: escapeColonsInMap(dimensions), + Labels: escapeColonsInMap(labels), + DimensionVal: escapeColons(filter), + Group: group, + Totals: totals, + } + + res := &qanpb.GetSelectedQueryMetadataReply{} + var queryBuffer bytes.Buffer + if tmpl, err := template.New("selectedQueryMetadataTmpl").Funcs(funcMap).Parse(selectedQueryMetadataTmpl); err != nil { + return res, errors.Wrap(err, cannotPrepare) + } else if err = tmpl.Execute(&queryBuffer, tmplArgs); err != nil { + return res, errors.Wrap(err, cannotExecute) + } + + query, args, err := sqlx.Named(queryBuffer.String(), arg) + if err != nil { + return res, errors.Wrap(err, cannotPrepare) + } + query, args, err = sqlx.In(query, args...) + if err != nil { + return res, errors.Wrap(err, cannotPopulate) + } + query = m.db.Rebind(query) + + queryCtx, cancel := context.WithTimeout(ctx, queryTimeout) + defer cancel() + + rows, err := m.db.QueryxContext(queryCtx, query, args...) + if err != nil { + return res, errors.Wrap(err, cannotExecute) + } + defer rows.Close() + + for rows.Next() { + err = rows.Scan( + &res.ServiceName, + &res.Database, + &res.Schema, + &res.Username, + &res.ReplicationSet, + &res.Cluster, + &res.ServiceType, + &res.ServiceId, + &res.Environment, + &res.NodeId, + &res.NodeName, + &res.NodeType) + if err != nil { + return res, errors.Wrap(err, "failed to scan query") + } else { + return res, nil + } + } + + return res, errors.New("query_id doesnt exists") +} diff --git a/qan-api2/services/analytics/object_details.go b/qan-api2/services/analytics/object_details.go index cc788cf522..9aac614264 100644 --- a/qan-api2/services/analytics/object_details.go +++ b/qan-api2/services/analytics/object_details.go @@ -22,6 +22,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "google.golang.org/protobuf/types/known/wrapperspb" qanpb "github.com/percona/pmm/api/qanpb" @@ -70,11 +71,12 @@ func (s *Service) GetMetrics(ctx context.Context, in *qanpb.MetricsRequest) (*qa labels, in.Totals) if err != nil { - return nil, fmt.Errorf("error in quering metrics:%v", err) + return nil, fmt.Errorf("error in quering metrics:%w", err) } if len(metricsList) < 2 { - return nil, fmt.Errorf("metrics not found for filter: %s and group: %s in given time range", in.FilterBy, in.GroupBy) + logrus.Debugf("metrics not found for filter: %s and group: %s in given time range", in.FilterBy, in.GroupBy) + return &qanpb.MetricsReply{}, nil } // Get metrics of one queryid, server etc. without totals metrics = metricsList[0] @@ -95,7 +97,8 @@ func (s *Service) GetMetrics(ctx context.Context, in *qanpb.MetricsRequest) (*qa totalLen := len(totalsList) if totalLen < 2 { - return nil, fmt.Errorf("totals not found for filter: %s and group: %s in given time range", in.FilterBy, in.GroupBy) + logrus.Debugf("totals not found for filter: %s and group: %s in given time range", in.FilterBy, in.GroupBy) + return &qanpb.MetricsReply{}, nil } // Get totals for given filter @@ -133,6 +136,18 @@ func (s *Service) GetMetrics(ctx context.Context, in *qanpb.MetricsRequest) (*qa resp.Fingerprint = fp } + metadata, err := s.mm.GetSelectedQueryMetadata(ctx, periodStartFromSec, + periodStartToSec, + in.FilterBy, // filter by queryid, or other. + in.GroupBy, + dimensions, + labels, + in.Totals) + if err != nil { + return resp, err + } + resp.Metadata = metadata + return resp, err } @@ -282,7 +297,7 @@ func (s *Service) GetLabels(ctx context.Context, in *qanpb.ObjectDetailsLabelsRe in.FilterBy, in.GroupBy) if err != nil { - return nil, fmt.Errorf("error in selecting object details labels:%v", err) + return nil, fmt.Errorf("error in selecting object details labels:%w", err) } return resp, nil } @@ -332,7 +347,7 @@ func (s *Service) GetHistogram(ctx context.Context, in *qanpb.HistogramRequest) labels, in.Queryid) if err != nil { - return nil, fmt.Errorf("error in selecting histogram:%v", err) + return nil, fmt.Errorf("error in selecting histogram:%w", err) } return resp, nil @@ -345,7 +360,7 @@ func (s *Service) QueryExists(ctx context.Context, in *qanpb.QueryExistsRequest) in.Serviceid, in.Query) if err != nil { - return nil, fmt.Errorf("error in checking query:%v", err) + return nil, fmt.Errorf("error in checking query:%w", err) } return wrapperspb.Bool(resp), nil @@ -358,7 +373,7 @@ func (s *Service) ExplainFingerprintByQueryID(ctx context.Context, in *qanpb.Exp in.Serviceid, in.QueryId) if err != nil { - return nil, fmt.Errorf("error in checking query:%v", err) + return nil, fmt.Errorf("error in checking query:%w", err) } return res, nil diff --git a/qan-api2/services/analytics/object_details_test.go b/qan-api2/services/analytics/object_details_test.go index 037893e116..d826295b8a 100644 --- a/qan-api2/services/analytics/object_details_test.go +++ b/qan-api2/services/analytics/object_details_test.go @@ -177,7 +177,7 @@ func TestService_GetMetricsError(t *testing.T) { FilterBy: "unexist", }, nil, - true, + false, }, { "no_period_start_from", diff --git a/qan-api2/services/analytics/profile.go b/qan-api2/services/analytics/profile.go index 186caed381..075056ebfa 100644 --- a/qan-api2/services/analytics/profile.go +++ b/qan-api2/services/analytics/profile.go @@ -134,12 +134,13 @@ func (s *Service) GetReport(ctx context.Context, in *qanpb.ReportRequest) (*qanp } total := results[0] - resp.TotalRows = uint32(total["total_rows"].(uint64)) + resp.TotalRows = uint32(total["total_rows"].(uint64)) //nolint:forcetypeassert resp.Offset = in.Offset resp.Limit = in.Limit for i, res := range results { numQueries := interfaceToFloat32(res["num_queries"]) + //nolint:forcetypeassert row := &qanpb.Row{ Rank: uint32(i) + in.Offset, Dimension: res["dimension"].(string), @@ -233,7 +234,8 @@ func makeStats(metricNameRoot string, total, res models.M, numQueries float32, p } // getOrderBy creates an order by string to use in query and column name to check if it in select column list. -func getOrderBy(reqOrder, defaultOrder string) (queryOrder string, orderCol string) { +func getOrderBy(reqOrder, defaultOrder string) (string, string) { + var queryOrder, orderCol string direction := "ASC" if strings.HasPrefix(reqOrder, "-") { reqOrder = strings.TrimPrefix(reqOrder, "-") diff --git a/qan-api2/test_data/GetMetrics_group_by_queryid.json b/qan-api2/test_data/GetMetrics_group_by_queryid.json index 1c0984c5de..d4e86c01e2 100644 --- a/qan-api2/test_data/GetMetrics_group_by_queryid.json +++ b/qan-api2/test_data/GetMetrics_group_by_queryid.json @@ -3433,5 +3433,18 @@ } }, - "fingerprint": "select @@global.slow_query_log_file" + "fingerprint": "select @@global.slow_query_log_file", + "metadata": { + "serviceName": "server0", + "schema": "schema29", + "username": "user8", + "replicationSet": "replication_set1", + "cluster": "cluster1", + "serviceType": "service_type1", + "serviceId": "service_id1", + "environment": "environment1", + "nodeId": "node_id1", + "nodeName": "node_name1", + "nodeType": "node_type1" + } } \ No newline at end of file diff --git a/qan-api2/test_data/GetMetrics_sparklines_90_points.json b/qan-api2/test_data/GetMetrics_sparklines_90_points.json index d32563e398..4b69df0d83 100644 --- a/qan-api2/test_data/GetMetrics_sparklines_90_points.json +++ b/qan-api2/test_data/GetMetrics_sparklines_90_points.json @@ -1435,5 +1435,18 @@ } }, - "fingerprint": "select @@global.slow_query_log_file" + "fingerprint": "select @@global.slow_query_log_file", + "metadata": { + "serviceName": "server2", + "schema": "schema81", + "username": "user38", + "replicationSet": "replication_set1", + "cluster": "cluster1", + "serviceType": "service_type1", + "serviceId": "service_id1", + "environment": "environment1", + "nodeId": "node_id1", + "nodeName": "node_name1", + "nodeType": "node_type1" + } } \ No newline at end of file diff --git a/qan-api2/test_data/GetMetrics_total.json b/qan-api2/test_data/GetMetrics_total.json index 9833425bae..32978b0f56 100644 --- a/qan-api2/test_data/GetMetrics_total.json +++ b/qan-api2/test_data/GetMetrics_total.json @@ -6696,5 +6696,18 @@ "wal_records": { } + }, + "metadata": { + "serviceName": "server0", + "schema": "schema12", + "username": "user19", + "replicationSet": "replication_set1", + "cluster": "cluster1", + "serviceType": "service_type1", + "serviceId": "service_id1", + "environment": "environment1", + "nodeId": "node_id1", + "nodeName": "node_name1", + "nodeType": "node_type1" } } \ No newline at end of file diff --git a/qan-api2/utils/logger/logger.go b/qan-api2/utils/logger/logger.go index a87b9da443..27bf50438d 100644 --- a/qan-api2/utils/logger/logger.go +++ b/qan-api2/utils/logger/logger.go @@ -33,7 +33,7 @@ func Get(ctx context.Context) *logrus.Entry { if v == nil { panic("context logger not set") } - return v.(*logrus.Entry) + return v.(*logrus.Entry) //nolint:forcetypeassert } // Set returns derived context with set logrus entry with given request ID. diff --git a/tools/go.mod b/tools/go.mod index 24445be7c8..0994dd06a8 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -8,9 +8,9 @@ require ( github.com/BurntSushi/go-sumtype v0.0.0-20190304192233-fcb4a6205bdc github.com/Percona-Lab/swagger-order v0.0.0-20191002141859-166b3973d026 github.com/apache/skywalking-eyes v0.4.0 - github.com/bufbuild/buf v1.17.0 + github.com/bufbuild/buf v1.19.0 github.com/daixiang0/gci v0.10.1 - github.com/envoyproxy/protoc-gen-validate v0.10.1 + github.com/envoyproxy/protoc-gen-validate v1.0.1 github.com/go-delve/delve v1.20.2 github.com/go-openapi/runtime v0.25.0 github.com/go-openapi/spec v0.20.4 @@ -19,10 +19,10 @@ require ( github.com/jstemmer/go-junit-report v1.0.0 github.com/quasilyte/go-consistent v0.0.0-20200404105227-766526bf1e96 github.com/reviewdog/reviewdog v0.14.1 - github.com/vburenin/ifacemaker v1.2.0 + github.com/vburenin/ifacemaker v1.2.1 github.com/vektra/mockery v1.1.2 golang.org/x/perf v0.0.0-20211012211434-03971e389cd3 - golang.org/x/tools v0.8.0 + golang.org/x/tools v0.9.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.30.0 gopkg.in/reform.v1 v1.5.1 @@ -30,30 +30,31 @@ require ( ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/datastore v1.10.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/bmatcuk/doublestar/v2 v2.0.4 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 // indirect - github.com/bufbuild/connect-go v1.6.0 // indirect + github.com/bufbuild/connect-go v1.7.0 // indirect github.com/bufbuild/protocompile v0.5.1 // indirect github.com/cilium/ebpf v0.7.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/cosiner/argv v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/denisenkom/go-mssqldb v0.9.0 // indirect github.com/derekparker/trie v0.0.0-20221213183930-4c74548207f4 // indirect - github.com/docker/cli v23.0.2+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v23.0.3+incompatible // indirect + github.com/docker/cli v23.0.5+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v23.0.6+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -80,7 +81,6 @@ require ( github.com/go-toolsmith/astinfo v1.0.0 // indirect github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 // indirect github.com/go-toolsmith/typep v1.0.2 // indirect - github.com/gofrs/flock v0.8.1 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gofrs/uuid/v5 v5.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -90,17 +90,17 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.14.0 // indirect + github.com/google/go-containerregistry v0.15.1 // indirect github.com/google/go-dap v0.7.0 // indirect github.com/google/go-github/v33 v33.0.0 // indirect github.com/google/go-github/v39 v39.2.0 // indirect github.com/google/go-github/v41 v41.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/licensecheck v0.3.1 // indirect - github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect + github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.0 // indirect @@ -118,12 +118,12 @@ require ( github.com/jessevdk/go-flags v1.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.16.3 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.6 // indirect - github.com/lyft/protoc-gen-star/v2 v2.0.1 // indirect + github.com/lyft/protoc-gen-star/v2 v2.0.3 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -135,11 +135,11 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.2 // indirect @@ -151,47 +151,48 @@ require ( github.com/reviewdog/go-bitbucket v0.0.0-20201024094602-708c3f6a7de0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/rs/cors v1.8.3 // indirect + github.com/rs/cors v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.12.0 // indirect github.com/subosito/gotenv v1.4.0 // indirect - github.com/tetratelabs/wazero v1.0.1 // indirect + github.com/tetratelabs/wazero v1.1.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect github.com/vvakame/sdlog v0.0.0-20200409072131-7c0d359efddc // indirect github.com/xanzy/go-gitlab v0.63.0 // indirect go.mongodb.org/mongo-driver v1.9.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/sdk v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.opentelemetry.io/otel v1.15.1 // indirect + go.opentelemetry.io/otel/sdk v1.15.1 // indirect + go.opentelemetry.io/otel/trace v1.15.1 // indirect go.starlark.net v0.0.0-20220816155156-cfacd8902214 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4 // indirect golang.org/x/build v0.0.0-20200616162219-07bebbe343e9 // indirect - golang.org/x/crypto v0.7.0 // indirect + golang.org/x/crypto v0.9.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.10.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.108.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 // indirect - google.golang.org/grpc v1.53.0 // indirect + google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 // indirect + google.golang.org/grpc v1.54.0 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/tools/go.sum b/tools/go.sum index 25cc64e54f..04f88a984a 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -18,16 +18,16 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -35,7 +35,7 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/datastore v1.10.0 h1:4siQRf4zTiAVt/oeH4GureGkApgb2vtPQAtOmhpqQwE= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -54,6 +54,7 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/BurntSushi/go-sumtype v0.0.0-20190304192233-fcb4a6205bdc h1:nvTP+jmloR0+J4YQur/rLRdLcGVEU4SquDgH+Bo7gBY= github.com/BurntSushi/go-sumtype v0.0.0-20190304192233-fcb4a6205bdc/go.mod h1:7yTWMMG2vOm4ABVciEt4EgNVP7fxwtcKIb/EuiLiKqY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -62,8 +63,8 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Percona-Lab/spec v0.20.5-percona h1:ViCJVq52QIZxpP8/Nv4/nIed+WnqUirNjPtXvHhset4= @@ -90,7 +91,7 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.30.15/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.3 h1:g+rSsSaAzhHJYcIQE78hJ3AhyjjtQvleKDjlhdBnIhc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -102,10 +103,10 @@ github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4k github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0= github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8= github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= -github.com/bufbuild/buf v1.17.0 h1:HqfVS5v81r1Q+vWm91V1uqdB77vO48uSjU1LAYC2DyE= -github.com/bufbuild/buf v1.17.0/go.mod h1:V//7h3wKBA+VVE2cv8CUox57dt9Y6JQ6tCM6cF2ptF8= -github.com/bufbuild/connect-go v1.6.0 h1:OCEB8JuEuvcY5lEKZCQE95CUscqkDtLnQceNhDgi92k= -github.com/bufbuild/connect-go v1.6.0/go.mod h1:GmMJYR6orFqD0Y6ZgX8pwQ8j9baizDrIQMm1/a6LnHk= +github.com/bufbuild/buf v1.19.0 h1:8c/XL39hO2hGgKWgUnRT3bCc8KvMa+V1jpoWWdN2bsw= +github.com/bufbuild/buf v1.19.0/go.mod h1:KiNA2H6TdwkCYS7ZUQIpn4vKq/d7IjUprkgCK9Ik4hU= +github.com/bufbuild/connect-go v1.7.0 h1:MGp82v7SCza+3RhsVhV7aMikwxvI3ZfD72YiGt8FYJo= +github.com/bufbuild/connect-go v1.7.0/go.mod h1:GmMJYR6orFqD0Y6ZgX8pwQ8j9baizDrIQMm1/a6LnHk= github.com/bufbuild/protocompile v0.5.1 h1:mixz5lJX4Hiz4FpqFREJHIXLfaLBntfaJv1h+/jS+Qg= github.com/bufbuild/protocompile v0.5.1/go.mod h1:G5iLmavmF4NsYtpZFvE3B/zFch2GIY8+wjsYLR/lc40= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -122,6 +123,7 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -147,12 +149,12 @@ github.com/derekparker/trie v0.0.0-20221213183930-4c74548207f4 h1:atN94qKNhLpy+9 github.com/derekparker/trie v0.0.0-20221213183930-4c74548207f4/go.mod h1:C7Es+DLenIpPc9J6IYw4jrK0h7S9bKj4DNl8+KxGEXU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/cli v23.0.2+incompatible h1:Yj4wkrNtyCNLCMobKDYzEUIsbtMbfAulkHMH75/ecik= -github.com/docker/cli v23.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho= -github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= +github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU= +github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -165,8 +167,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= @@ -277,7 +279,6 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -348,8 +349,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.15.1 h1:RsJ9NbfxYWF8Wl4VmvkpN3zYATwuvlPq2j20zmcs63E= +github.com/google/go-containerregistry v0.15.1/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= github.com/google/go-dap v0.7.0 h1:088PdKBUkxAxrXrnY8FREUJXpS6Y6jhAyZIuJv3OGOM= github.com/google/go-dap v0.7.0/go.mod h1:5q8aYQFnHOAZEMP+6vmq25HKYAEwE+LF5yh7JKrrhSQ= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -378,21 +379,21 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de h1:6bMcLOeKoNo0+mTOb1ee3McF6CCKGixjLR3EDQY1Jik= +github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= @@ -452,7 +453,6 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= @@ -462,7 +462,6 @@ github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 h1:2uT3aivO7NVpUPGcQX7RbHijHMyWix/yCnIrCWc+5co= github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v1.4.1-0.20181029123624-5de817a9aa20/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= @@ -485,10 +484,10 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= -github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -507,8 +506,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lyft/protoc-gen-star/v2 v2.0.1 h1:keaAo8hRuAT0O3DfJ/wM3rufbAjGeJ1lAtWZHDjKGB0= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3 h1:/3+/2sWyXeMLzKd1bX+ixWKgEMsULrIivpDsuaF441o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -553,8 +552,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -566,8 +565,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -619,8 +618,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= -github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= +github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -651,8 +650,8 @@ github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -682,16 +681,18 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69 github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tetratelabs/wazero v1.0.1 h1:xyWBoGyMjYekG3mEQ/W7xm9E05S89kJ/at696d/9yuc= -github.com/tetratelabs/wazero v1.0.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.1.0 h1:EByoAhC+QcYpwSZJSs/aV0uokxPwBgKxfiokSUwAknQ= +github.com/tetratelabs/wazero v1.1.0/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= -github.com/vburenin/ifacemaker v1.2.0 h1:jREjCJ8RgTZuH5EYWB0/1ZHdTpJVqhMBU87XIUeX+2I= -github.com/vburenin/ifacemaker v1.2.0/go.mod h1:oZwuhpbmYD8SjjofPhscHVmYxNtRLdczDCslWrb/q2w= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= +github.com/vburenin/ifacemaker v1.2.1 h1:3Vq8B/bfBgjWTkv+jDg4dVL1KHt3k1K4lO7XRxYA2sk= +github.com/vburenin/ifacemaker v1.2.1/go.mod h1:5WqrzX2aD7/hi+okBjcaEQJMg4lDGrpuEX3B8L4Wgrs= github.com/vektra/mockery v1.1.2 h1:uc0Yn67rJpjt8U/mAZimdCKn9AeA97BOkjpmtBSlfP4= github.com/vektra/mockery v1.1.2/go.mod h1:VcfZjKaFOPO+MpN4ZvwPjs4c48lkq1o3Ym8yHZJu0jU= github.com/vvakame/sdlog v0.0.0-20200409072131-7c0d359efddc h1:El7LEavRpa49dYFE9ezO8aQxQn5E7u7eQkFsaXsoQAY= @@ -722,17 +723,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= +go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= +go.opentelemetry.io/otel/sdk v1.15.1 h1:5FKR+skgpzvhPQHIEfcwMYjCBr14LWzs3uSqKiQzETI= +go.opentelemetry.io/otel/sdk v1.15.1/go.mod h1:8rVtxQfrbmbHKfqzpQkT5EzZMcbMBwTzNAggbEAM0KA= +go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= +go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= go.starlark.net v0.0.0-20220816155156-cfacd8902214 h1:MqijAN3S61c7KWasOk+zIqIjHQPN6WUra/X3+YAkQxQ= go.starlark.net v0.0.0-20220816155156-cfacd8902214/go.mod h1:VZcBMdr3cT3PnBoWunTabuSEXwVAH+ZJ5zxfs3AdASk= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -762,8 +763,8 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -845,8 +846,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220517181318-183a9ca12b87/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -858,8 +859,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/perf v0.0.0-20211012211434-03971e389cd3 h1:TxpziJvKtFH7T75kH/tX3QELShnXGyWX1iVgw8hU9EY= golang.org/x/perf v0.0.0-20211012211434-03971e389cd3/go.mod h1:KRSrLY7jerMEa0Ih7gBheQ3FYDiSx6liMnniX1o3j2g= @@ -876,8 +877,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -939,14 +940,15 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1026,8 +1028,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1055,8 +1057,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.108.0 h1:WVBc/faN0DkKtR43Q/7+tPny9ZoLZdIiAyG5Q9vFClg= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1101,8 +1103,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 h1:znp6mq/drrY+6khTAlJUDNFFcDGV2ENLYKpMq8SyCds= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 h1:VmCWItVXcKboEMCwZaWge+1JLiTCQSngZeINF+wzO+g= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1120,8 +1122,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/update/.devcontainer/install-dev-tools.sh b/update/.devcontainer/install-dev-tools.sh index fc3296bcb0..8e5b43d1f9 100755 --- a/update/.devcontainer/install-dev-tools.sh +++ b/update/.devcontainer/install-dev-tools.sh @@ -8,7 +8,7 @@ set -o errexit set -o xtrace # download (in the background) the same verison as used by PMM build process -curl -sS https://dl.google.com/go/go1.19.4.linux-amd64.tar.gz -o /tmp/golang.tar.gz & +curl -sS https://dl.google.com/go/go1.20.3.linux-amd64.tar.gz -o /tmp/golang.tar.gz & # to install man pages sed -i '/nodocs/d' /etc/yum.conf diff --git a/update/ansible/playbook/tasks/create-lvm.yml b/update/ansible/playbook/tasks/create-lvm.yml index 7c446cef72..93a072b38f 100644 --- a/update/ansible/playbook/tasks/create-lvm.yml +++ b/update/ansible/playbook/tasks/create-lvm.yml @@ -1,6 +1,6 @@ - hosts: localhost - become: yes - gather_facts: yes + become: true + gather_facts: true tasks: - name: Stop supervisord service: diff --git a/update/ansible/playbook/tasks/files/cloud.cfg b/update/ansible/playbook/tasks/files/cloud.cfg new file mode 100644 index 0000000000..97aaf5084c --- /dev/null +++ b/update/ansible/playbook/tasks/files/cloud.cfg @@ -0,0 +1,89 @@ +# NOTE: this is a sample cloud.cfg file that is retrieved when building PMM on DigitalOcean. +# It shall be used as a reference only. It is not used by PMM. + +# The top level settings are used as module +# and system configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +disable_root: true + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + + + + +# The modules that run in the 'init' stage +cloud_init_modules: + - migrator + - seed_random + - bootcmd + - write-files + - growpart + - resizefs + - disk_setup + - mounts + - set_hostname + - update_hostname + - update_etc_hosts + - ca-certs + - rsyslog + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: + - ssh-import-id + - keyboard + - locale + - set-passwords + - ntp + - timezone + - disable-ec2-metadata + - runcmd + +# The modules that run in the 'final' stage +cloud_final_modules: + - package-update-upgrade-install + - write-files-deferred + - puppet + - chef + - mcollective + - salt-minion + - reset_rmc + - refresh_rmc_and_interface + - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - install-hotplug + - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used + # Unknown/fallback distro. + distro: ubuntu \ No newline at end of file diff --git a/update/ansible/playbook/tasks/init.yml b/update/ansible/playbook/tasks/init.yml index 1fb2c8d3c4..828b8055c6 100644 --- a/update/ansible/playbook/tasks/init.yml +++ b/update/ansible/playbook/tasks/init.yml @@ -1,8 +1,8 @@ --- # This playbook contains tasks executed during initialization PMM Server - hosts: localhost - become: yes - gather_facts: yes + become: true + gather_facts: true tasks: - name: Run initializaion role include_role: diff --git a/update/ansible/playbook/tasks/roles/clickhouse/files/config.xml b/update/ansible/playbook/tasks/roles/clickhouse/files/config.xml index 07e64a3f17..4998380847 100644 --- a/update/ansible/playbook/tasks/roles/clickhouse/files/config.xml +++ b/update/ansible/playbook/tasks/roles/clickhouse/files/config.xml @@ -808,17 +808,17 @@ asynchronous_metrics - send data from table system.asynchronous_metrics status_info - send data from different component from CH, ex: Dictionaries status --> - true true - --> diff --git a/update/ansible/playbook/tasks/roles/clickhouse/tasks/main.yml b/update/ansible/playbook/tasks/roles/clickhouse/tasks/main.yml index 3dbfd911e4..61ed5c7508 100644 --- a/update/ansible/playbook/tasks/roles/clickhouse/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/clickhouse/tasks/main.yml @@ -1,10 +1,24 @@ --- -- name: Stop clickhouse before update +- name: Stop and remove clickhouse before update | EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl {{ item }} clickhouse changed_when: True loop: - - "stop" - - "remove" + - stop + - remove + +- name: Find supervisord's socket + stat: + path: /var/run/supervisor/supervisor.sock + register: supervisord_socket + +- name: Stop and remove clickhouse before update | EL9 + when: supervisord_socket.stat.exists and (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl {{ item }} clickhouse + become: true + loop: + - stop + - remove - name: Remove old clickhouse packages yum: @@ -21,6 +35,7 @@ name: clickhouse-server state: stopped enabled: no + ignore_errors: true - name: Create clickhouse data directory file: @@ -29,21 +44,40 @@ owner: root group: pmm +# RHEL9 dropped support for SHA1 gpg keys - name: Import clickhouse repo GPG key + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" rpm_key: state: present key: "https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG" -- name: Install clickhouse repo +- name: Install clickhouse repo | EL7 + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" yum_repository: name: clickhouse file: clickhouse description: "Clickhouse repo" - baseurl: "https://packages.clickhouse.com/rpm/stable/" + baseurl: "https://repo.clickhouse.com/rpm/stable/x86_64/" enabled: no gpgcheck: 1 gpgkey: "https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG" +# RHEL9 dropped support for SHA1 gpg keys, so we disable gpgcheck :( +- name: Install clickhouse repo | EL9 + when: + - (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + yum_repository: + name: clickhouse + file: clickhouse + description: "Clickhouse repo" + baseurl: "https://repo.clickhouse.com/rpm/stable/x86_64/" + enabled: no + gpgcheck: 0 + - name: Install clickhouse package yum: name: @@ -52,7 +86,7 @@ - clickhouse-common-static-{{ clickhouse_version}} state: installed enablerepo: clickhouse - ignore_errors: '{{ ansible_check_mode }}' # We don't have clickhouse repo when we run ansible with --check + ignore_errors: "{{ ansible_check_mode }}" # We don't have clickhouse repo when we run ansible with --check - name: Copy clickhouse config to image copy: @@ -60,8 +94,8 @@ dest: /etc/clickhouse-server/config.xml mode: 0600 -# We need to remove capabilities because we run PMM in unprivileged container and we can't use it -# But we run clickhouse under root user +# We need to remove capabilities because we run PMM in an unprivileged container +# But we run clickhouse as root user - name: Remove cap_ipc_lock from clickhouse binary capabilities: path: /usr/bin/clickhouse @@ -83,6 +117,18 @@ owner: root group: root -- name: Start clickhouse +- name: Start clickhouse EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + supervisorctl: + name: clickhouse + state: "{{ item }}" + supervisorctl_path: /usr/local/bin/supervisorctl + become: true + loop: + - present + - started + +- name: Start clickhouse EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl add clickhouse changed_when: True diff --git a/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml b/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml index 0f8f11e49f..cda0d9ed08 100644 --- a/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/dashboards_upgrade/tasks/main.yml @@ -6,30 +6,30 @@ - name: Copy plugins from old plugin directory block: - - name: Find custom plugin in old plugin dir - find: - paths: /var/lib/grafana/plugins - recurse: no - file_type: directory - excludes: - - '*-???????' - - pmm-app - register: custom_plugins - - - name: Synchronization plugins (old version before 2.22.0) - synchronize: - src: "{{ item['path'] }}" - dest: "/srv/grafana/plugins/" - loop: "{{ custom_plugins['files'] }}" - - - name: Find custom plugin in old plugin dir - find: - paths: /var/lib/grafana/plugins - recurse: no - file_type: directory - excludes: - - '*-???????' - - pmm-app + - name: Find custom plugins in old plugin dir + find: + paths: /var/lib/grafana/plugins + recurse: no + file_type: directory + excludes: + - "*-???????" + - pmm-app + register: custom_plugins + + - name: Synchronization plugins (for versions before 2.22.0) + synchronize: + src: "{{ item['path'] }}" + dest: "/srv/grafana/plugins/" + loop: "{{ custom_plugins['files'] }}" + + - name: Find custom plugin in old plugin dir + find: + paths: /var/lib/grafana/plugins + recurse: no + file_type: directory + excludes: + - "*-???????" + - pmm-app when: old_plugin_dir_exist.stat.exists - name: Get plugin list @@ -57,38 +57,67 @@ - name: Remove old dashboards from SQLite block: - - name: Restart grafana before delete data - supervisorctl: - name: grafana - state: stopped - ignore_errors: True # TODO we have race condition here. We generate grafana supervisor config in pmm-managed and it doesn't exist on this stage - - - name: Remove old DBaaS dashboard (Before provisioning times) - command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'DBaaS' AND slug = 'dbaas';" - changed_when: True - - - name: Remove old PMM Inventory (Before provisioning times) - command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Inventory' AND slug = 'pmm-inventory';" - changed_when: True - - - name: Remove old PMM Add Instance dashboard (Before provisioning times) - command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Add Instance' AND slug = 'pmm-add-instance';" - changed_when: True - - - name: Remove old PMM Database Checks dashboard (Before provisioning times) - command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Database Checks' AND slug = 'pmm-database-checks';" - changed_when: True - - - name: Remove old PMM Settings dashboard (Before provisioning times) - command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Settings' AND slug = 'pmm-settings';" - changed_when: True + - name: Restart grafana before deleting data EL7 + supervisorctl: + name: grafana + state: stopped + supervisorctl_path: supervisorctl + become: true + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + ignore_errors: true + + - name: Restart grafana before deleting data EL9 + supervisorctl: + name: grafana + state: stopped + supervisorctl_path: /usr/local/bin/supervisorctl + become: true + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + ignore_errors: true + # FIXME: we have a race condition here. We generate grafana supervisor config in pmm-managed and it doesn't exist at this stage + + - name: Remove old DBaaS dashboard (Before provisioning times) + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'DBaaS' AND slug = 'dbaas';" + changed_when: true + + - name: Remove old PMM Inventory (Before provisioning times) + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Inventory' AND slug = 'pmm-inventory';" + changed_when: true + + - name: Remove old PMM Add Instance dashboard (Before provisioning times) + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Add Instance' AND slug = 'pmm-add-instance';" + changed_when: true + + - name: Remove old PMM Database Checks dashboard (Before provisioning times) + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Database Checks' AND slug = 'pmm-database-checks';" + changed_when: true + + - name: Remove old PMM Settings dashboard (Before provisioning times) + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "DELETE FROM dashboard WHERE title = 'PMM Settings' AND slug = 'pmm-settings';" + changed_when: true when: sqlite_grafana.stat.exists -- name: Restart grafana with new plugins +- name: Restart grafana with new plugins EL7 + supervisorctl: + name: grafana + state: restarted + supervisorctl_path: supervisorctl + become: true + ignore_errors: true + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + # TODO: fix the race condition. + # We generate grafana supervisor config in pmm-managed and it may not exist at this stage + +- name: Restart grafana with new plugins EL9 supervisorctl: name: grafana state: restarted - ignore_errors: True # TODO we have race condition here. We generate grafana supervisor config in pmm-managed and it doesn't exist on this stage + supervisorctl_path: /usr/local/bin/supervisorctl + become: true + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + ignore_errors: true + # TODO: fix the race condition. + # We generate grafana supervisor config in pmm-managed and it may not exist at this stage - name: Copy file with image version copy: diff --git a/update/ansible/playbook/tasks/roles/grafana/files/grafana.ini b/update/ansible/playbook/tasks/roles/grafana/files/grafana.ini index 5b5d16fd32..2d71a9fac4 100644 --- a/update/ansible/playbook/tasks/roles/grafana/files/grafana.ini +++ b/update/ansible/playbook/tasks/roles/grafana/files/grafana.ini @@ -1,6 +1,18 @@ ##################### Grafana Configuration ##################### # Only changed settings. You can find default settings in /usr/share/grafana/conf/defaults.ini +#################################### Database #################################### +[database] +# You can configure the database connection by specifying type, host, name, user and password +# as separate properties or as on string using the url properties. + +# Either "mysql", "postgres" or "sqlite3", it's your choice +type = postgres +host = localhost +user = grafana +# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" +password = grafana + [paths] # Directory where grafana will automatically scan and look for plugins plugins = /srv/grafana/plugins diff --git a/update/ansible/playbook/tasks/roles/grafana/tasks/main.yml b/update/ansible/playbook/tasks/roles/grafana/tasks/main.yml index cce395aa28..6c53e92edf 100644 --- a/update/ansible/playbook/tasks/roles/grafana/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/grafana/tasks/main.yml @@ -23,6 +23,42 @@ group: grafana mode: '0444' +- name: Check that the SQLite grafana database exists + stat: + path: /srv/grafana/grafana.db + register: sqlite_grafana + +- name: Temporary change database to SQLite + block: + - name: Remove database options (SQLite is default) + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: type + value: absent + + - name: Remove database host + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: host + state: absent + + - name: Remove database user + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: user + state: absent + + - name: Remove database password + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: password + state: absent + when: sqlite_grafana.stat.exists + - name: Create provisioning directory file: path: "/usr/share/grafana/conf/provisioning/{{ item }}" diff --git a/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml b/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml index 9ace7e4dd3..f84e0a2616 100644 --- a/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/initialization/tasks/main.yml @@ -1,11 +1,19 @@ --- # This role contains tasks executed during initialization of PMM Server +- name: Determine type of upgrade + set_fact: + ui_upgrade: False + when: ui_upgrade is undefined + # PMM-10858 - In certain environments, including AWS EC2, some of the # EPEL repository mirrors do not respond within the time limit defined # by pmm-update which is currently set to 30 seconds. This was causing # supervisord to kill pmm-update-checker - name: Update repository settings + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" command: yum-config-manager --setopt=epel.timeout=1 --save changed_when: True @@ -57,3 +65,35 @@ include_role: name: postgres when: is_postgres_11.stat.exists + +- name: Create grafana database in postgres + postgresql_db: + name: grafana + state: present + +- name: Create grafana user in postgres + postgresql_user: + db: grafana + name: grafana + password: grafana + priv: 'ALL' + expires: infinity + state: present + when: not ansible_check_mode + +- name: Run SQLite -> Postgres only for docker upgrade + block: + - name: Check that the SQLite grafana database exists + stat: + path: /srv/grafana/grafana.db + register: is_database_sqlite + + - name: Migrate Grafana database from SQLite to Postgresql + include_role: + name: sqlite-to-postgres + when: is_database_sqlite.stat.exists + tags: + - skip_ansible_lint # '503 Tasks that run when changed should likely be handlers'. + # We use current_version_file['failed'] because we don't want to run this on creating container + when: not ui_upgrade and current_version_file['failed'] == false + diff --git a/update/ansible/playbook/tasks/roles/nginx/tasks/main.yml b/update/ansible/playbook/tasks/roles/nginx/tasks/main.yml index 371b3e0b76..57d7f95ae6 100644 --- a/update/ansible/playbook/tasks/roles/nginx/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/nginx/tasks/main.yml @@ -1,6 +1,9 @@ --- # We already have nginx package in epel repo -- name: Add Nginx repository +- name: Add Nginx repository for RHEL7 + when: + - ansible_distribution == 'CentOS' + - ansible_distribution_major_version == '7' yum_repository: name: nginx description: nginx repo @@ -8,6 +11,17 @@ gpgcheck: no enabled: no +- name: Add Nginx repository for RHEL9 + when: + - ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux' + - ansible_distribution_major_version == '9' + yum_repository: + name: nginx + description: nginx repo + baseurl: http://nginx.org/packages/rhel/9/$basearch/ + gpgcheck: no + enabled: no + - name: Create directories for nginx file: path: "{{ item }}" @@ -17,22 +31,22 @@ - /etc/nginx/conf.d/ - /etc/nginx/ssl/ -- name: Install nginx rpm | Install nginx rpm +- name: Install nginx rpm | Install nginx yum: - name: nginx-1.20.1 + name: nginx state: installed - name: Add ssl-related files and scripts copy: src: "ssl/{{ item.src }}" dest: "{{ item.dest }}" - mode: '{{ item.mode }}' + mode: "{{ item.mode }}" loop: - - { src: ca-certs.pem, dest: /etc/nginx/ssl/ca-certs.pem, mode: 'u=rw,g=r,o=r' } - - { src: certificate.conf, dest: /etc/nginx/ssl/certificate.conf, mode: 'u=rw,g=r,o=r' } - - { src: dhparam.pem, dest: /etc/nginx/ssl/dhparam.pem, mode: 'u=rw,g=r,o=r' } - - { src: generate-ssl-certificate, dest: /var/lib/cloud/scripts/per-boot/generate-ssl-certificate, mode: 'u=rwx,g=rx,o=rx' } - - { src: upgrade-certificate, dest: /var/lib/cloud/scripts/per-boot/upgrade-certificate, mode: 'u=rwx,g=rx,o=rx' } + - { src: ca-certs.pem, dest: /etc/nginx/ssl/ca-certs.pem, mode: "u=rw,g=r,o=r" } + - { src: certificate.conf, dest: /etc/nginx/ssl/certificate.conf, mode: "u=rw,g=r,o=r" } + - { src: dhparam.pem, dest: /etc/nginx/ssl/dhparam.pem, mode: "u=rw,g=r,o=r" } + - { src: generate-ssl-certificate, dest: /var/lib/cloud/scripts/per-boot/generate-ssl-certificate, mode: "u=rwx,g=rx,o=rx" } + - { src: upgrade-certificate, dest: /var/lib/cloud/scripts/per-boot/upgrade-certificate, mode: "u=rwx,g=rx,o=rx" } - name: NGINX | Copy nginx configs copy: @@ -60,7 +74,7 @@ - name: Check nginx configuration command: nginx -t - changed_when: False + changed_when: false - name: Copy local-rss.xml file copy: @@ -68,10 +82,11 @@ dest: /usr/share/pmm-server/static/ mode: 0644 -- name: Restart nginx - command: supervisorctl {{ item }} nginx - changed_when: True - loop: - - "stop" - - "remove" - - "add" +# - name: Restart nginx +# command: /usr/local/bin/supervisorctl {{ item }} nginx +# become: true +# changed_when: True +# loop: +# - "stop" +# - "remove" +# - "add" diff --git a/update/ansible/playbook/tasks/roles/postgres/tasks/main.yml b/update/ansible/playbook/tasks/roles/postgres/tasks/main.yml index e65a40ec03..0bab71b74d 100644 --- a/update/ansible/playbook/tasks/roles/postgres/tasks/main.yml +++ b/update/ansible/playbook/tasks/roles/postgres/tasks/main.yml @@ -1,36 +1,79 @@ --- # Install Postgres -- name: Add PostgreSQL 14 YUM repository - yum_repository: - name: percona-ppg-14 - description: PostgreSQL YUM repository - x86_64 - baseurl: http://repo.percona.com/ppg-14/yum/release/7/RPMS/x86_64 - gpgcheck: yes - enabled: yes - gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY - -- name: Add PostgreSQL 11 YUM repository - yum_repository: - name: percona-ppg-11 - description: PostgreSQL YUM repository - x86_64 - baseurl: http://repo.percona.com/ppg-11/yum/release/7/RPMS/x86_64 - gpgcheck: yes - enabled: yes - gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY - -# we need old postgres binary for upgrade process -- name: Install Postgres - yum: - name: - - percona-postgresql14-server - - percona-postgresql14-contrib - - percona-postgresql14 - - percona-postgresql11-server - - percona-postgresql11-contrib - - percona-postgresql11 - - python-psycopg2 - state: installed - when: not ansible_check_mode +- name: Install Postgres for EL7 + block: + - name: Add PostgreSQL 14 YUM repository for EL7 + yum_repository: + name: percona-ppg-14 + description: PostgreSQL YUM repository - x86_64 + baseurl: http://repo.percona.com/ppg-14/yum/release/7/RPMS/x86_64 + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + + - name: Add PostgreSQL 11 YUM repository for EL7 + yum_repository: + name: percona-ppg-11 + description: PostgreSQL YUM repository - x86_64 + baseurl: http://repo.percona.com/ppg-11/yum/release/7/RPMS/x86_64 + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + + # we need the old postgres binary for the upgrade process + - name: Install Postgres + when: + - not ansible_check_mode + yum: + name: + - percona-postgresql14-server + - percona-postgresql14-contrib + - percona-postgresql14 + - percona-postgresql11-server + - percona-postgresql11-contrib + - percona-postgresql11 + - python-psycopg2 # Python PostgreSQL database adapter§ + state: installed + when: + - ansible_distribution == "CentOS" + - ansible_distribution_major_version == "7" + +- name: Install Postgres for EL9 + block: + - name: Add PostgreSQL 14 YUM repository for EL9 + yum_repository: + name: percona-ppg-14 + description: PostgreSQL YUM repository - x86_64 + baseurl: http://repo.percona.com/ppg-14/yum/release/9/RPMS/x86_64 + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + + - name: Add PostgreSQL 11 YUM repository for EL9 + yum_repository: + name: percona-ppg-11 + description: PostgreSQL YUM repository - x86_64 + baseurl: http://repo.percona.com/ppg-11/yum/release/9/RPMS/x86_64 + gpgcheck: yes + enabled: yes + gpgkey: file:///etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY + + # we need the old postgres binary for the upgrade process + - name: Install Postgres + when: + - not ansible_check_mode + dnf: + name: + - percona-postgresql14-server + - percona-postgresql14-contrib + - percona-postgresql14 + - percona-postgresql11-server + - percona-postgresql11-contrib + - percona-postgresql11 + - python-psycopg2 # Python PostgreSQL database adapter§ + state: installed + when: + - (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' - name: Create Postgres log file file: @@ -52,6 +95,8 @@ path: /srv/postgres14 register: is_postgres_14 +# From this line on, the script will only run if pg14 is not installed, +# which means that PMM is about to migrate from pg11 to pg14 - name: Upgrade (or init) postgres block: - name: Create Postgres data dir @@ -60,10 +105,11 @@ state: directory owner: postgres group: postgres + mode: "0700" - name: Initialize Postgres database - command: /usr/pgsql-14/bin/initdb -D /srv/postgres14 - become: yes + command: /usr/pgsql-14/bin/initdb -D /srv/postgres14 --auth=trust + become: true become_user: postgres become_method: su @@ -72,18 +118,34 @@ path: /var/run/supervisor/supervisor.sock register: is_upgrade - - name: Stop pmm-managed and postgres before backup database + - name: Stop pmm-managed and postgres before backup database | EL7 supervisorctl: name: "{{ item }}" state: stopped loop: - pmm-managed - postgresql - when: is_upgrade.stat.exists + become: true + when: + - is_upgrade.stat.exists + - ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + + - name: Stop pmm-managed and postgres before backup database | EL9 + supervisorctl: + name: "{{ item }}" + state: stopped + supervisorctl_path: /usr/local/bin/supervisorctl + loop: + - pmm-managed + - postgresql + become: true + when: + - is_upgrade.stat.exists + - ansible_distribution != 'CentOS' and ansible_distribution_major_version == '9' - name: Run Postgres database without supervisor command: /usr/pgsql-11/bin/pg_ctl start -D /srv/postgres - become: yes + become: true become_user: postgres become_method: su when: is_upgrade.stat.exists @@ -97,14 +159,14 @@ - name: Stop Postgres database without supervisor command: /usr/pgsql-11/bin/pg_ctl stop -D /srv/postgres - become: yes + become: true become_user: postgres become_method: su when: is_upgrade.stat.exists - name: Start Postgres 14 database without supervisor - command: /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 - become: yes + command: /usr/pgsql-14/bin/pg_ctl start -D /srv/postgres14 -o "-c logging_collector=off" + become: true become_user: postgres become_method: su @@ -138,7 +200,7 @@ - name: Stop Postgres 14 database without supervisor command: /usr/pgsql-14/bin/pg_ctl stop -D /srv/postgres14 - become: yes + become: true become_user: postgres become_method: su @@ -149,28 +211,43 @@ dest: /srv/postgres11 when: is_upgrade.stat.exists - - name: Remove old Postgres direcroty + - name: Remove old Postgres directory file: path: /srv/postgres state: absent when: is_upgrade.stat.exists - name: Reread supervisord configuration - command: supervisorctl reread + command: /usr/local/bin/supervisorctl reread + become: true when: is_upgrade.stat.exists - name: Restart Postgres - command: supervisorctl {{ item }} postgresql + command: /usr/local/bin/supervisorctl {{ item }} postgresql changed_when: True + become: true loop: - stop - remove - add when: is_upgrade.stat.exists - - name: Run pmm-managed again + - name: Run pmm-managed again | EL7 supervisorctl: name: pmm-managed state: started - when: is_upgrade.stat.exists + become: true + when: + - is_upgrade.stat.exists + - ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + + - name: Run pmm-managed again | EL9 + supervisorctl: + name: pmm-managed + state: started + supervisorctl_path: /usr/local/bin/supervisorctl + become: true + when: + - is_upgrade.stat.exists + - ansible_distribution != 'CentOS' and ansible_distribution_major_version == '9' when: not is_postgres_14.stat.exists diff --git a/update/ansible/playbook/tasks/roles/sqlite-to-postgres/tasks/main.yml b/update/ansible/playbook/tasks/roles/sqlite-to-postgres/tasks/main.yml new file mode 100644 index 0000000000..3d035a4a5a --- /dev/null +++ b/update/ansible/playbook/tasks/roles/sqlite-to-postgres/tasks/main.yml @@ -0,0 +1,122 @@ +--- +- name: Create Grafana backup dir + file: + path: "/srv/backup/grafana" + state: directory + owner: grafana + group: grafana + mode: '0700' + +- name: Stop grafana before upgrade + supervisorctl: + name: 'grafana' + state: stopped + +- name: Create backup for SQLite Grafana database + copy: + src: /srv/grafana/grafana.db + dest: "/srv/backup/grafana/grafana.db" + owner: grafana + group: grafana + mode: '0700' + +- name: Remove all ` symbols in grafana dashboard description + command: sqlite3 /srv/grafana/grafana.db -cmd ".timeout 60000" "UPDATE dashboard SET data = REPLACE(data, '`', '');" + changed_when: True + +- name: Disable provisioning before change database + ini_file: + dest: /etc/grafana/grafana.ini + section: paths + option: provisioning + value: conf/provisioning_disable + +- name: Switch to postgres + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: type + value: postgres + +- name: Set database host + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: host + value: localhost + +- name: Set database user + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: user + value: grafana + +- name: Set database password + ini_file: + dest: /etc/grafana/grafana.ini + section: database + option: password + value: grafana + +- name: Start grafana again + supervisorctl: + name: grafana + state: restarted + ignore_errors: yes + +- name: Check if initial data were created + postgresql_query: + db: grafana + query: SELECT 1 FROM org WHERE id=1 + retries: 3 + delay: 3 + register: psql_result + until: psql_result.rowcount == 1 + when: not ansible_check_mode + +- name: Wait for grafana database initialization + pause: + seconds: 10 + +- name: Stop grafana before upgrade + supervisorctl: + name: grafana + state: stopped + +- name: Remove default admin user + postgresql_query: + db: grafana + query: DELETE FROM public.user WHERE login='admin' + when: not ansible_check_mode + +- name: Run grafana migrator + command: grafana-db-migrator --change-char-to-text --reset-home-dashboard /srv/grafana/grafana.db "postgres://grafana:grafana@localhost:5432/grafana?sslmode=disable" + register: migrator_output + changed_when: "'All done' in migrator_output.stdout" + +- name: Enable provisioning after change database + ini_file: + dest: /etc/grafana/grafana.ini + section: paths + option: provisioning + value: conf/provisioning + +- name: Start grafana again + supervisorctl: + name: grafana + state: restarted + +- name: Wait for grafana initialization + pause: + seconds: 5 + +- name: Fix database/folder relationship + command: grafana-db-migrator --fix-folders-id /srv/grafana/grafana.db "postgres://grafana:grafana@localhost:5432/grafana?sslmode=disable" + register: migrator_output + changed_when: "'All done' in migrator_output.stdout" + +- name: Remove SQLite Grafana database + file: + path: /srv/grafana/grafana.db + state: absent diff --git a/update/ansible/playbook/tasks/update.yml b/update/ansible/playbook/tasks/update.yml index 7f54b72fc1..39ca84dca7 100644 --- a/update/ansible/playbook/tasks/update.yml +++ b/update/ansible/playbook/tasks/update.yml @@ -1,8 +1,13 @@ --- # This playbook contains tasks executed during PMM Server update. - hosts: localhost - become: yes - gather_facts: yes + become: true + remote_user: root + gather_facts: true + + environment: + PATH: /usr/local/bin:{{ ansible_env.PATH }} + vars: pmm_packages: - percona-victoriametrics @@ -15,6 +20,7 @@ - pmm2-client - pmm-dump - vmproxy + - grafana-db-migrator pre_tasks: - name: detect /srv/pmm-distribution stat: @@ -46,7 +52,7 @@ - percona-grafana state: latest - - name: Create supervisor dir + - name: Create supervisord dir file: path: /etc/supervisord.d/ state: directory @@ -115,13 +121,24 @@ path: /var/run/supervisor/supervisor.sock register: is_supervisor_running - - name: Supervisord start | Start supervisord for docker - when: is_docker and not is_supervisor_running.stat.exists + - name: Supervisord start EL7 | Start supervisord for docker + when: is_docker and not is_supervisor_running.stat.exists and ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' shell: supervisord -c /etc/supervisord.conf & + - name: Supervisord start EL9 | Start supervisord for docker + when: is_docker and not is_supervisor_running.stat.exists and (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + shell: /usr/local/bin/supervisord -c /etc/supervisord.conf & + + - name: Wait until postgres port is present before continuing + wait_for: + host: localhost + port: 5432 + - name: Run initialization playbook include_role: name: initialization + vars: + ui_upgrade: True - name: Enable crond service when: not is_docker @@ -141,11 +158,19 @@ # See https://github.com/Supervisor/supervisor/issues/1264 for explanation # why we do reread + stop/remove/add instead of using supervisorctl Ansible module. - - name: Reread supervisord configuration + - name: Reread supervisord configuration EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl reread register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: Reread supervisord configuration EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl reread + become: true + register: reread_result + changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: Check reread results debug: var=reread_result.stdout_lines @@ -178,9 +203,18 @@ state: latest # restart pmm-managed first as it may update supervisord configuration on start - - name: Restart pmm-managed + - name: Restart pmm-managed EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl {{ item }} pmm-managed - changed_when: True + become: true + changed_when: true + with_items: ["stop", "remove", "add"] + + - name: Restart pmm-managed EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl {{ item }} pmm-managed + become: true + changed_when: true with_items: ["stop", "remove", "add"] # give pmm-managed time to update supervisord configuration, @@ -196,12 +230,15 @@ exclude: - nginx* - - name: Updating vulnerable packages + - name: Updating only select packages yum: name: "{{ item }}" state: latest loop: - nss + - tzdata + - libssh2 + - sshpass - name: Install nginx include_role: @@ -220,7 +257,7 @@ enabled: no when: not is_docker - - name: Remove old packages + - name: Remove old or redundant packages yum: state: absent name: @@ -266,19 +303,42 @@ - name: Create empty configuration file for VictoriaMetrics file: path=/etc/victoriametrics-promscrape.yml state=touch owner=pmm group=pmm - - name: Reread supervisord configuration again + - name: Reread supervisord configuration again EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl reread register: reread_result changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: Reread supervisord configuration again EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl reread + register: reread_result + changed_when: "'No config updates to processes' not in reread_result.stdout" + - name: Check reread results debug: var=reread_result.stdout_lines - - name: Restart services + - name: Restart services EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl {{ item.1 }} {{ item.0 }} - changed_when: True + become: true + changed_when: true + with_nested: + - - alertmanager + - nginx + - grafana + - qan-api2 + - pmm-agent + - ["stop", "remove", "add"] + + - name: Restart services EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' and is_docker + command: /usr/local/bin/supervisorctl {{ item.1 }} {{ item.0 }} + become: true + changed_when: true with_nested: - - alertmanager + - nginx - grafana - qan-api2 - pmm-agent @@ -301,30 +361,76 @@ option: autostart value: "true" - - name: Reread pmm-update-perform-init supervisor config + - name: Reread pmm-update-perform-init supervisor config EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl reread register: reread_init__result changed_when: "'No config updates to processes' not in reread_init__result.stdout" - - name: Update/restart other services + - name: Reread pmm-update-perform-init supervisor config EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl reread + register: reread_init__result + changed_when: "'No config updates to processes' not in reread_init__result.stdout" + + - name: Update/restart other services EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' command: supervisorctl update register: update_result changed_when: "'updated' in update_result.stdout" - - name: Check other services + - name: Update/restart other services EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + command: /usr/local/bin/supervisorctl update + register: update_result + changed_when: "'updated' in update_result.stdout" + + - name: Print other services's logs debug: var=update_result.stdout_lines + - name: Check that the SQLite grafana database exists + stat: + path: /srv/grafana/grafana.db + register: is_database_sqlite + + - name: Migrate Grafana database from SQLite to Postgresql + include_role: + name: sqlite-to-postgres + when: is_database_sqlite.stat.exists + tags: + - skip_ansible_lint # '503 Tasks that run when changed should likely be handlers'. + + - name: Fix grafana fields type + postgresql_query: + db: grafana + query: "{{ item }}" + loop: + - ALTER TABLE tag ALTER COLUMN key TYPE text; + - ALTER TABLE tag ALTER COLUMN value TYPE text; + - ALTER TABLE api_key ALTER COLUMN key TYPE text; + - ALTER TABLE api_key ALTER COLUMN name TYPE text; + when: not ansible_check_mode + # SIGUSR2 is sent to supervisord by pmm-managed right before the update for logging to work correctly. # We use that fact to show what was restarted during the update. - - name: Get supervisord log + - name: Get supervisord logs EL7 + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' shell: supervisorctl maintail -100000 | tac | awk '!flag; /received SIGUSR2/{flag = 1};' | tac register: maintail_result changed_when: False - - name: Check supervisord log + - name: Get supervisord logs EL9 + when: (ansible_distribution == 'OracleLinux' or ansible_distribution == 'AlmaLinux') and ansible_distribution_major_version == '9' + shell: /usr/local/bin/supervisorctl maintail -100000 | tac | awk '!flag; /received SIGUSR2/{flag = 1};' | tac + register: maintail_result + changed_when: False + + - name: Print supervisord logs debug: var=maintail_result.stdout_lines - - name: Delete content & directory + - name: Cleanup yum cache file: state: absent path: /var/cache/yum + + diff --git a/update/main.go b/update/main.go index 7a6279325f..01c71fa3ad 100644 --- a/update/main.go +++ b/update/main.go @@ -160,7 +160,7 @@ func main() { go func() { s := <-signals signal.Stop(signals) - logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) + logrus.Warnf("Got %s, shutting down...", unix.SignalName(s.(unix.Signal))) //nolint:forcetypeassert cancel() }() diff --git a/update/main_test.go b/update/main_test.go index 3eae131b1c..5731973001 100644 --- a/update/main_test.go +++ b/update/main_test.go @@ -30,7 +30,7 @@ func TestPackages(t *testing.T) { if err != nil { // This branch is required for tests with pmm-server:2.0.0 // In this case the exit code is 2. - e, ok := err.(*exec.ExitError) + e, ok := err.(*exec.ExitError) //nolint:errorlint require.True(t, ok) sb := string(b) diff --git a/utils/sqlrows/sqlrows.go b/utils/sqlrows/sqlrows.go index c13a360e52..7255f427c0 100644 --- a/utils/sqlrows/sqlrows.go +++ b/utils/sqlrows/sqlrows.go @@ -19,7 +19,11 @@ package sqlrows import "database/sql" // ReadRows reads and closes given *sql.Rows, returning columns, data rows, and first encountered error. -func ReadRows(rows *sql.Rows) (columns []string, dataRows [][]interface{}, err error) { +func ReadRows(rows *sql.Rows) ([]string, [][]interface{}, error) { + var columns []string + var dataRows [][]interface{} + var err error + defer func() { // overwrite err with e only if err does not already contain (a more interesting) error if e := rows.Close(); err == nil { @@ -29,7 +33,7 @@ func ReadRows(rows *sql.Rows) (columns []string, dataRows [][]interface{}, err e columns, err = rows.Columns() if err != nil { - return + return columns, dataRows, err } for rows.Next() { @@ -39,7 +43,7 @@ func ReadRows(rows *sql.Rows) (columns []string, dataRows [][]interface{}, err e dest[i] = &ei } if err = rows.Scan(dest...); err != nil { - return + return columns, dataRows, err } // Each dest element is an *interface{} (&ei above) which always contain some typed data @@ -48,7 +52,7 @@ func ReadRows(rows *sql.Rows) (columns []string, dataRows [][]interface{}, err e // (Go string can contain any byte sequence), but prevents json.Marshal (at jsonRows) from encoding // them as base64 strings. for i, d := range dest { - ei := *(d.(*interface{})) + ei := *(d.(*interface{})) //nolint:forcetypeassert dest[i] = ei if b, ok := (ei).([]byte); ok { dest[i] = string(b) @@ -58,5 +62,6 @@ func ReadRows(rows *sql.Rows) (columns []string, dataRows [][]interface{}, err e dataRows = append(dataRows, dest) } err = rows.Err() - return //nolint:nakedret + + return columns, dataRows, err }