diff --git a/.github/auto-comment.yml b/.github/auto-comment.yml index 604c2f8784..9a85bc9b7b 100644 --- a/.github/auto-comment.yml +++ b/.github/auto-comment.yml @@ -1,16 +1,16 @@ pullRequestOpened: | - :wave: Thanks for creating a PR! + :wave: Thanks for creating a PR! - Before we can merge this PR, please make sure that all the following items have been + Before we can merge this PR, please make sure that all the following items have been checked off. If any of the checklist items are not applicable, please leave them but - write a little note why. + write a little note why. - - [ ] Wrote tests - - [ ] Updated CHANGELOG_PENDING.md - - [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work. - - [ ] Updated relevant documentation (`docs/`) and code comments - - [ ] Re-reviewed `Files changed` in the Github PR explorer - - [ ] Applied Appropriate Labels + - [ ] Wrote tests + - [ ] Updated CHANGELOG_PENDING.md + - [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work. + - [ ] Updated relevant documentation (`docs/`) and code comments + - [ ] Re-reviewed `Files changed` in the Github PR explorer + - [ ] Applied Appropriate Labels - Thank you for your contribution to Tendermint! :rocket: \ No newline at end of file + Thank you for your contribution to Tendermint! :rocket: diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b6729552d2..3db35d523e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,25 +3,48 @@ updates: - package-ecosystem: github-actions directory: "/" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + - package-ecosystem: npm directory: "/docs" + schedule: + interval: weekly + open-pull-requests-limit: 10 + + ################################### + ## + ## Update All Go Dependencies + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + target-branch: "master" + open-pull-requests-limit: 10 + labels: + - T:dependencies + - S:automerge + + - package-ecosystem: gomod + directory: "/" schedule: interval: daily - time: "11:00" + target-branch: "v0.34.x" open-pull-requests-limit: 10 - reviewers: - - fadeev + labels: + - T:dependencies + - S:automerge + - package-ecosystem: gomod directory: "/" schedule: interval: daily - time: "11:00" + target-branch: "v0.35.x" open-pull-requests-limit: 10 - reviewers: - - melekes - - tessr labels: - T:dependencies + - S:automerge diff --git a/.github/linter/markdownlint.yml b/.github/linter/markdownlint.yml deleted file mode 100644 index 1637001cc2..0000000000 --- a/.github/linter/markdownlint.yml +++ /dev/null @@ -1,8 +0,0 @@ -default: true, -MD007: { "indent": 4 } -MD013: false -MD024: { siblings_only: true } -MD025: false -MD033: { no-inline-html: false } -no-hard-tabs: false -whitespace: false diff --git a/.github/linters/markdownlint.yml b/.github/linters/markdownlint.yml new file mode 100644 index 0000000000..6a4f61a499 --- /dev/null +++ b/.github/linters/markdownlint.yml @@ -0,0 +1,8 @@ +default: true, +MD007: {"indent": 4} +MD013: false +MD024: {siblings_only: true} +MD025: false +MD033: {no-inline-html: false} +no-hard-tabs: false +whitespace: false diff --git a/.github/linters/yaml-lint.yml b/.github/linters/yaml-lint.yml new file mode 100644 index 0000000000..e6fd77d117 --- /dev/null +++ b/.github/linters/yaml-lint.yml @@ -0,0 +1,9 @@ +--- +# Default rules for YAML linting from super-linter. +# See: See https://yamllint.readthedocs.io/en/stable/rules.html +extends: default +rules: + document-end: disable + document-start: disable + line-length: disable + truthy: disable diff --git a/.github/mergify.yml b/.github/mergify.yml index d492645973..d13fb851f9 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -1,13 +1,22 @@ +queue_rules: + - name: default + conditions: + - base=master + - label=S:automerge + pull_request_rules: - name: Automerge to master conditions: - base=master - label=S:automerge actions: - merge: + queue: method: squash - strict: smart+fasttrack - commit_message: title+body + name: default + commit_message_template: | + {{ title }} (#{{ number }}) + + {{ body }} - name: backport patches to v0.34.x branch conditions: - base=master @@ -24,4 +33,3 @@ pull_request_rules: backport: branches: - v0.35.x - diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000000..db157ad371 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,82 @@ +name: Build +# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps) +# This workflow runs on every push to master or release branch and every pull requests +# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified +on: + pull_request: + push: + branches: + - master + - release/** + +jobs: + build: + name: Build + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + goarch: ["arm", "amd64"] + goos: ["linux"] + timeout-minutes: 5 + steps: + - uses: actions/setup-go@v2 + with: + go-version: "1.17" + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 + with: + PATTERNS: | + **/**.go + "!test/" + go.mod + go.sum + Makefile + - name: install + run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build + if: "env.GIT_DIFF != ''" + + test_abci_cli: + runs-on: ubuntu-latest + needs: build + timeout-minutes: 5 + steps: + - uses: actions/setup-go@v2 + with: + go-version: "1.17" + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: install + run: make install_abci + if: "env.GIT_DIFF != ''" + - run: abci/tests/test_cli/test.sh + shell: bash + if: "env.GIT_DIFF != ''" + + test_apps: + runs-on: ubuntu-latest + needs: build + timeout-minutes: 5 + steps: + - uses: actions/setup-go@v2 + with: + go-version: "1.17" + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - name: install + run: make install install_abci + if: "env.GIT_DIFF != ''" + - name: test_apps + run: test/app/test.sh + shell: bash + if: "env.GIT_DIFF != ''" diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml deleted file mode 100644 index 4a3b89074d..0000000000 --- a/.github/workflows/coverage.yml +++ /dev/null @@ -1,132 +0,0 @@ -name: Test Coverage -on: - pull_request: - push: - paths: - - "**.go" - branches: - - master - - release/** - -jobs: - split-test-files: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.4 - - name: Create a file with all the pkgs - run: go list ./... > pkgs.txt - - name: Split pkgs into 4 files - run: split -d -n l/4 pkgs.txt pkgs.txt.part. - # cache multiple - - uses: actions/upload-artifact@v2 - with: - name: "${{ github.sha }}-00" - path: ./pkgs.txt.part.00 - - uses: actions/upload-artifact@v2 - with: - name: "${{ github.sha }}-01" - path: ./pkgs.txt.part.01 - - uses: actions/upload-artifact@v2 - with: - name: "${{ github.sha }}-02" - path: ./pkgs.txt.part.02 - - uses: actions/upload-artifact@v2 - with: - name: "${{ github.sha }}-03" - path: ./pkgs.txt.part.03 - - build-linux: - name: Build - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - goarch: ["arm", "amd64"] - timeout-minutes: 5 - steps: - - uses: actions/setup-go@v2 - with: - go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 - with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - - name: install - run: GOOS=linux GOARCH=${{ matrix.goarch }} make build - if: "env.GIT_DIFF != ''" - - tests: - runs-on: ubuntu-latest - needs: split-test-files - strategy: - fail-fast: false - matrix: - part: ["00", "01", "02", "03"] - steps: - - uses: actions/setup-go@v2 - with: - go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 - with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-${{ matrix.part }}" - if: env.GIT_DIFF - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: "1.17" - - name: test & coverage report creation - run: | - cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out - if: env.GIT_DIFF - - uses: actions/upload-artifact@v2 - with: - name: "${{ github.sha }}-${{ matrix.part }}-coverage" - path: ./${{ matrix.part }}profile.out - - upload-coverage-report: - runs-on: ubuntu-latest - needs: tests - steps: - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 - with: - PATTERNS: | - **/**.go - "!test/" - go.mod - go.sum - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-00-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-01-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-02-coverage" - if: env.GIT_DIFF - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-03-coverage" - if: env.GIT_DIFF - - run: | - cat ./*profile.out | grep -v "mode: set" >> coverage.txt - if: env.GIT_DIFF - - uses: codecov/codecov-action@v2.1.0 - with: - file: ./coverage.txt - if: env.GIT_DIFF diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a9ead295e2..5c67583201 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,20 +1,19 @@ -name: Build & Push -# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags +name: Docker +# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags # and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags on: - pull_request: push: branches: - master tags: - - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 - - "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5 + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5 jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Prepare id: prep run: | @@ -39,18 +38,18 @@ jobs: with: platforms: all - - name: Set up Docker Buildx + - name: Set up Docker Build uses: docker/setup-buildx-action@v1.6.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v1.10.0 + uses: docker/login-action@v1.12.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.7.0 + uses: docker/build-push-action@v2.8.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index d43bff12f2..2a3f0015f6 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -6,7 +6,7 @@ name: e2e-nightly-34x on: - workflow_dispatch: # allow running workflow manually, in theory + workflow_dispatch: # allow running workflow manually, in theory schedule: - cron: '0 2 * * *' @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 with: ref: 'v0.34.x' @@ -59,7 +59,7 @@ jobs: SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x SLACK_FOOTER: '' - e2e-nightly-success: # may turn this off once they seem to pass consistently + e2e-nightly-success: # may turn this off once they seem to pass consistently needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml index b3acdf62be..13e3f80200 100644 --- a/.github/workflows/e2e-nightly-35x.yml +++ b/.github/workflows/e2e-nightly-35x.yml @@ -5,7 +5,7 @@ name: e2e-nightly-35x on: - workflow_dispatch: # allow running workflow manually + workflow_dispatch: # allow running workflow manually schedule: - cron: '0 2 * * *' @@ -25,7 +25,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 with: ref: 'v0.35.x' @@ -59,7 +59,7 @@ jobs: SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x SLACK_FOOTER: '' - e2e-nightly-success: # may turn this off once they seem to pass consistently + e2e-nightly-success: # may turn this off once they seem to pass consistently needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index da8b07d70e..3602cd8322 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -5,7 +5,7 @@ name: e2e-nightly-master on: - workflow_dispatch: # allow running workflow manually + workflow_dispatch: # allow running workflow manually schedule: - cron: '0 2 * * *' @@ -24,7 +24,7 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Build working-directory: test/e2e @@ -56,7 +56,7 @@ jobs: SLACK_MESSAGE: Nightly E2E tests failed on master SLACK_FOOTER: '' - e2e-nightly-success: # may turn this off once they seem to pass consistently + e2e-nightly-success: # may turn this off once they seem to pass consistently needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 134ae979c9..71aec16f77 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -2,7 +2,7 @@ name: e2e # Runs the CI end-to-end test network on all pushes to master or release branches # and every pull request, but only if any Go files have been changed. on: - workflow_dispatch: # allow running workflow manually + workflow_dispatch: # allow running workflow manually pull_request: push: branches: @@ -17,8 +17,8 @@ jobs: - uses: actions/setup-go@v2 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | **/**.go @@ -35,4 +35,3 @@ jobs: working-directory: test/e2e run: ./run-multiple.sh networks/ci.toml if: "env.GIT_DIFF != ''" - diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 38ca6896d6..e12ee2321e 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -1,7 +1,7 @@ # Runs fuzzing nightly. -name: fuzz-nightly +name: Fuzz Tests on: - workflow_dispatch: # allow running workflow manually + workflow_dispatch: # allow running workflow manually schedule: - cron: '0 3 * * *' pull_request: @@ -17,30 +17,15 @@ jobs: with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - name: Install go-fuzz working-directory: test/fuzz run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - - name: Fuzz mempool-v1 + - name: Fuzz mempool working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1 - continue-on-error: true - - - name: Fuzz mempool-v0 - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0 - continue-on-error: true - - - name: Fuzz p2p-addrbook - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook - continue-on-error: true - - - name: Fuzz p2p-pex - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool continue-on-error: true - name: Fuzz p2p-sc diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 0e358af6e4..60b49443d3 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: repository: 'tendermint/jepsen' diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index c97b22cd19..af446771a4 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -1,12 +1,12 @@ name: Check Markdown links -on: +on: schedule: - cron: '* */24 * * *' jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v2.4.0 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3e257e47c5..12bf3b553a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -13,8 +13,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | **/**.go diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index fa7c9bd993..628b1af69e 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,17 +19,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.3.4 - with: - # Full git history is needed to get a proper list of changed files within `super-linter` - fetch-depth: 0 + uses: actions/checkout@v2.4.0 - name: Lint Code Base uses: docker://github/super-linter:v4 env: - LINTER_RULES_PATH: . VALIDATE_ALL_CODEBASE: true DEFAULT_BRANCH: master GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VALIDATE_MD: true VALIDATE_OPENAPI: true VALIDATE_YAML: true + YAML_CONFIG_FILE: yaml-lint.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 567a607cab..3d65b289bb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,14 +5,14 @@ on: branches: - "RC[0-9]/**" tags: - - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 jobs: goreleaser: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v2.4.0 with: fetch-depth: 0 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6ee4db915f..f3f5cba1d1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,106 +1,75 @@ -name: Tests -# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps) -# This workflow runs on every push to master or release branch and every pull requests -# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified +name: Test on: pull_request: push: + paths: + - "**.go" branches: - master - release/** jobs: - build: - name: Build + tests: runs-on: ubuntu-latest - timeout-minutes: 5 + strategy: + fail-fast: false + matrix: + part: ["00", "01", "02", "03", "04", "05"] steps: - uses: actions/setup-go@v2 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | **/**.go + "!test/" go.mod go.sum - - name: install - run: make install install_abci - if: "env.GIT_DIFF != ''" - - uses: actions/cache@v2.1.7 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + Makefile + - name: Run Go Tests + run: | + make test-group-${{ matrix.part }} NUM_SPLIT=6 if: env.GIT_DIFF - # Cache binaries for use by other jobs - - uses: actions/cache@v2.1.7 + - uses: actions/upload-artifact@v2 with: - path: ~/go/bin - key: ${{ runner.os }}-${{ github.sha }}-tm-binary - if: env.GIT_DIFF + name: "${{ github.sha }}-${{ matrix.part }}-coverage" + path: ./build/${{ matrix.part }}.profile.out - test_abci_cli: + upload-coverage-report: runs-on: ubuntu-latest - needs: build - timeout-minutes: 5 + needs: tests steps: - - uses: actions/setup-go@v2 - with: - go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v2.4.0 + - uses: technote-space/get-diff-action@v6.0.1 with: PATTERNS: | **/**.go + "!test/" go.mod go.sum - - uses: actions/cache@v2.1.7 + Makefile + - uses: actions/download-artifact@v2 with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + name: "${{ github.sha }}-00-coverage" if: env.GIT_DIFF - - uses: actions/cache@v2.1.7 + - uses: actions/download-artifact@v2 with: - path: ~/go/bin - key: ${{ runner.os }}-${{ github.sha }}-tm-binary + name: "${{ github.sha }}-01-coverage" if: env.GIT_DIFF - - run: abci/tests/test_cli/test.sh - shell: bash - if: env.GIT_DIFF - - test_apps: - runs-on: ubuntu-latest - needs: build - timeout-minutes: 5 - steps: - - uses: actions/setup-go@v2 + - uses: actions/download-artifact@v2 with: - go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 - with: - PATTERNS: | - **/**.go - go.mod - go.sum - - uses: actions/cache@v2.1.7 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + name: "${{ github.sha }}-02-coverage" if: env.GIT_DIFF - - uses: actions/cache@v2.1.7 + - uses: actions/download-artifact@v2 with: - path: ~/go/bin - key: ${{ runner.os }}-${{ github.sha }}-tm-binary + name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - - name: test_apps - run: test/app/test.sh - shell: bash + - run: | + cat ./*profile.out | grep -v "mode: set" >> coverage.txt + if: env.GIT_DIFF + - uses: codecov/codecov-action@v2.1.0 + with: + file: ./coverage.txt if: env.GIT_DIFF diff --git a/.gitignore b/.gitignore index 7f412d4612..b753f03754 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ docs/_build docs/dist docs/node_modules/ docs/spec +docs/.vuepress/public/rpc index.html.md libs/pubsub/query/fuzz_test/output profile\.out @@ -46,3 +47,10 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +proto/tendermint/blocksync/types.proto +proto/tendermint/consensus/types.proto +proto/tendermint/mempool/*.proto +proto/tendermint/p2p/*.proto +proto/tendermint/statesync/*.proto +proto/tendermint/types/*.proto +proto/tendermint/version/*.proto diff --git a/.golangci.yml b/.golangci.yml index e0f3fe163e..7330f25ee6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -24,7 +24,7 @@ linters: - govet - ineffassign # - interfacer - - lll + # - lll # - maligned - misspell - nakedret @@ -46,9 +46,6 @@ issues: - path: _test\.go linters: - gosec - - linters: - - lll - source: "https://" max-same-issues: 50 linters-settings: diff --git a/.goreleaser.yml b/.goreleaser.yml index 267d4e4ac0..28c6a017d6 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -29,8 +29,8 @@ release: archives: - files: - - LICENSE - - README.md - - UPGRADING.md - - SECURITY.md - - CHANGELOG.md + - LICENSE + - README.md + - UPGRADING.md + - SECURITY.md + - CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md index e1ef2201a2..e0a6126705 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,67 +2,95 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). -## v0.35.0-rc2 +## v0.35.1 -September 27, 2021 +January 26, 2022 + +Special thanks to external contributors on this release: @altergui, @odeke-em, +@thanethomson ### BREAKING CHANGES -- Go API +- CLI/RPC/Config - - [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on - deprecated fundamentals. Downstream users should maintain this - library. (@tychoish) - - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to - `internal` to prevent consumption of these internal APIs by - external users. (@tychoish) + - [config] [\#7276](https://github.com/tendermint/tendermint/pull/7276) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson). + +- P2P Protocol + + - [p2p] [\#7265](https://github.com/tendermint/tendermint/pull/7265) Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) + - [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui) ### FEATURES -- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). +- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) (@cmwaters) +### IMPROVEMENTS -## v0.35.0-rc1 +- [internal/protoio] [\#7325](https://github.com/tendermint/tendermint/pull/7325) Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em) +- [\#7338](https://github.com/tendermint/tendermint/pull/7338) pubsub: Performance improvements for the event query API (backport of #7319) (@creachadair) +- [\#7252](https://github.com/tendermint/tendermint/pull/7252) Add basic metrics to the indexer package. (@creachadair) +- [\#7338](https://github.com/tendermint/tendermint/pull/7338) Performance improvements for the event query API. (@creachadair) -September 8, 2021 +### BUG FIXES -Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis, -@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua +- [\#7310](https://github.com/tendermint/tendermint/issues/7310) pubsub: Report a non-nil error when shutting down (fixes #7306). +- [\#7355](https://github.com/tendermint/tendermint/pull/7355) Fix incorrect tests using the PSQL sink. (@creachadair) +- [\#7683](https://github.com/tendermint/tendermint/pull/7683) rpc: check error code for broadcast_tx_commit. (@tychoish) -### BREAKING CHANGES +## v0.35.0 -- CLI/RPC/Config - - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) - - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) - - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) - - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) - - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) - - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) - - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) - - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) - - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) - - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) - - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) - - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. - - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 - - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. - - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) - - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. - - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) +November 4, 2021 -- Apps - - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface - - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. - - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. - - It is not required any longer to set ldflags to set version strings - - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app +Special thanks to external contributors on this release: @JayT106, +@bipulprasad, @alessio, @Yawning, @silasdavis, @cuonglm, @tanyabouman, +@JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua + +### FEATURES + +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of an incorrect app hash. (@cmwaters) +- [config] [\#7174](https://github.com/tendermint/tendermint/pull/7174) expose ability to write config to arbitrary paths. (@tychoish) +- [mempool, rpc] [\#7065](https://github.com/tendermint/tendermint/pull/7065) add removetx rpc method (backport of #7047) (@tychoish). +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). +- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam +- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) +- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to + accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer + exchange reactors behave the same. (@cmwaters) +- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type +- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) + - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of + the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. + - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the + `mempool.version` configuration, where `v1` is the default configuration. + - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. + - Transactions are gossiped in FIFO order as they are in `v0`. +- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) +- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) +- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) +- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) + +### BUG FIXES + +- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish). +- [\#7142](https://github.com/tendermint/tendermint/pull/7142) mempool: remove panic when recheck-tx was not sent to ABCI application (@williambanfield). +- [consensus]: [\#7060](https://github.com/tendermint/tendermint/pull/7060) + wait until peerUpdates channel is closed to close remaining peers (@williambanfield) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) +- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) +- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) + +### BREAKING CHANGES - Go API + + - [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on + deprecated fundamentals. Downstream users should maintain this + library. (@tychoish) + - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to + `internal` to prevent consumption of these internal APIs by + external users. (@tychoish) - [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) - [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) - [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) @@ -98,34 +126,45 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, - [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) -- Data Storage - - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) - - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) - - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) +- CLI/RPC/Config -- Tooling + - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) + - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) + - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) + - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) + - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) + - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) + - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) + - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) + - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) + - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) + - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) + - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. + - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 + - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. + - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) -### FEATURES +- Apps -- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam -- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) -- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to - accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer - exchange reactors behave the same. (@cmwaters) -- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type -- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) - - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of - the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. - - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the - `mempool.version` configuration, where `v1` is the default configuration. - - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. - - Transactions are gossiped in FIFO order as they are in `v0`. -- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) -- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) -- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) -- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) + - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. + - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. + - It is not required any longer to set ldflags to set version strings + - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app + +- Data Storage + - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) + - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) + - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) ### IMPROVEMENTS @@ -170,13 +209,20 @@ Special thanks to external contributors on this release: @JayT106, @bipulprasad, - [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) - [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) +## v0.34.15 + +Special thanks to external contributors on this release: @thanethomson + ### BUG FIXES -- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) -- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) -- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) -- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) +- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters). +- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306). +- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair). +- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish). + +### IMPROVEMENTS + +- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson). ## v0.34.14 @@ -1994,7 +2040,7 @@ For more, see issues marked This release also includes a fix to prevent Tendermint from including the same piece of evidence in more than one block. This issue was reported by @chengwenxi in our -[bug bounty program](https://hackerone.com/tendermint). +[bug bounty program](https://hackerone.com/cosmos). ### BREAKING CHANGES: @@ -2487,7 +2533,7 @@ Special thanks to external contributors on this release: @james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu. Special thanks to @Slamper for a series of bug reports in our [bug bounty -program](https://hackerone.com/tendermint) which are fixed in this release. +program](https://hackerone.com/cosmos) which are fixed in this release. This release is primarily about adding Version fields to various data structures, optimizing consensus messages for signing and verification in diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9d8148783c..8dccb8dd3b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,31 +12,63 @@ Special thanks to external contributors on this release: - CLI/RPC/Config - - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). + - [rpc] \#7121 Remove the deprecated gRPC interface to the RPC service. (@creachadair) + - [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish) + - [mempool] \#7171 Remove legacy mempool implementation. (@tychoish) + - [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair) + - [rpc] \#7713 Remove unused options for websocket clients. (@creachadair) - Apps + - [proto/tendermint] \#6976 Remove core protobuf files in favor of only housing them in the [tendermint/spec](https://github.com/tendermint/spec) repository. + - P2P Protocol - - [p2p] \#7035 Remove legacy P2P routing implementation and - associated configuration options (@tychoish) + - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) + - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) + - [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui) - Go API - - [blocksync] \#7046 Remove v2 implementation of the blocksync - service and recactor, which was disabled in the previous release - (@tychoish) + - [rpc] \#7474 Remove the "URI" RPC client. (@creachadair) + - [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair) + - [libs/sync] \#7450 Internalize and remove the library. (@creachadair) + - [libs/async] \#7449 Move library to internal. (@creachadair) + - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) + - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) + - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) + - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) + - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish) + - [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair) + - [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair) - Blockchain Protocol ### FEATURES +- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) +- [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen) - [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. - [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) +- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) +- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield) +- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield) +- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca) +- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca) +- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca) ### IMPROVEMENTS +- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em) +- [consensus] \#6969 remove logic to 'unlock' a locked block. +- [pubsub] \#7319 Performance improvements for the event query API (@creachadair) +- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp) +- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp) +- [light] [\#7536](https://github.com/tendermint/tendermint/pull/7536) rpc /status call returns info about the light client (@jmalicevic) + ### BUG FIXES - fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek) +- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov) +- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16bef07ccf..e4613f84e2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,7 +109,7 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. +We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. This command uses the spec repo to get the necessary protobuf files for generating the go code. If you are modifying the proto files manually for changes in the core data structures, you will need to clone them into the go repo and comment out lines 22-37 of the file `./scripts/protocgen.sh`. ### Visual Studio Code diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 0465bec09c..2785d7e240 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ # stage 1 Generate Tendermint Binary -FROM golang:1.16-alpine as builder +FROM golang:1.17-alpine as builder RUN apk update && \ apk upgrade && \ apk --no-cache add make @@ -8,7 +8,7 @@ WORKDIR /tendermint RUN make build-linux # stage 2 -FROM golang:1.15-alpine +FROM golang:1.17-alpine LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json diff --git a/Makefile b/Makefile index 2bac7f5bfa..2a644ebe0a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ #!/usr/bin/make -f -PACKAGES=$(shell go list ./...) BUILDDIR ?= $(CURDIR)/build BUILD_TAGS?=tendermint @@ -14,8 +13,8 @@ endif LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -HTTPS_GIT := https://github.com/tendermint/tendermint.git -DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf +BUILD_IMAGE := ghcr.io/tendermint/docker-build-proto +DOCKER_PROTO_BUILDER := docker run -v $(shell pwd):/workspace --workdir /workspace $(BUILD_IMAGE) CGO_ENABLED ?= 0 # handle nostrip @@ -79,32 +78,17 @@ $(BUILDDIR)/: ### Protobuf ### ############################################################################### -proto-all: proto-gen proto-lint proto-check-breaking -.PHONY: proto-all - proto-gen: @docker pull -q tendermintdev/docker-build-proto @echo "Generating Protobuf files" - @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh + @$(DOCKER_PROTO_BUILDER) sh ./scripts/protocgen.sh .PHONY: proto-gen -proto-lint: - @$(DOCKER_BUF) lint --error-format=json -.PHONY: proto-lint - proto-format: @echo "Formatting Protobuf files" - docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; + @$(DOCKER_PROTO_BUILDER) find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; .PHONY: proto-format -proto-check-breaking: - @$(DOCKER_BUF) breaking --against .git#branch=master -.PHONY: proto-check-breaking - -proto-check-breaking-ci: - @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master -.PHONY: proto-check-breaking-ci - ############################################################################### ### Build ABCI ### ############################################################################### @@ -118,7 +102,7 @@ install_abci: .PHONY: install_abci ############################################################################### -### Privval Server ### +### Privval Server ### ############################################################################### build_privval_server: @@ -303,3 +287,25 @@ build-reproducible: --name latest-build cosmossdk/rbuilder:latest docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/ .PHONY: build-reproducible + +# Implements test splitting and running. This is pulled directly from +# the github action workflows for better local reproducibility. + +GO_TEST_FILES != find $(CURDIR) -name "*_test.go" + +# default to four splits by default +NUM_SPLIT ?= 4 + +$(BUILDDIR): + mkdir -p $@ + +# The format statement filters out all packages that don't have tests. +# Note we need to check for both in-package tests (.TestGoFiles) and +# out-of-package tests (.XTestGoFiles). +$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR) + go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@ + +split-test-packages:$(BUILDDIR)/packages.txt + split -d -n l/$(NUM_SPLIT) $< $<. +test-group-%:split-test-packages + cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out diff --git a/README.md b/README.md index fe10a35a0e..e148766ec2 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ a more detailed overview what to expect from this repository. | Requirement | Notes | |-------------|------------------| -| Go version | Go1.16 or higher | +| Go version | Go1.17 or higher | ## Contributing diff --git a/RELEASES.md b/RELEASES.md index 8d9bc2b8ea..a7f862e335 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -29,8 +29,8 @@ merging the pull request. ### Creating a backport branch -If this is the first release candidate for a major release, you get to have the honor of creating -the backport branch! +If this is the first release candidate for a major release, you get to have the +honor of creating the backport branch! Note that, after creating the backport branch, you'll also need to update the tags on `master` so that `go mod` is able to order the branches correctly. You @@ -43,16 +43,33 @@ the 0.35.x line. 1. Start on `master` 2. Create and push the backport branch: - `git checkout -b v0.35.x; git push origin v0.35.x` -3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: - `git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev` -4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch. - (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml - for an example.) -5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + ```sh + git checkout -b v0.35.x + git push origin v0.35.x + ``` + +After doing these steps, go back to `master` and do the following: + +1. Tag `master` as the dev branch for the _next_ major release and push it back up. + For example: + ```sh + git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." + git push origin v0.36.0-dev + ``` + +2. Create a new workflow to run e2e nightlies for the new backport branch. + (See [e2e-nightly-master.yml][e2e] for an example.) + +3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. +4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to + enable automatic update of Go dependencies on this branch. Copy and edit one + of the existing branch configurations to set the correct `target-branch`. + +[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + ## Release candidates Before creating an official release, especially a major release, we may want to create a @@ -129,6 +146,8 @@ If there were no release candidates, begin by creating a backport branch, as des - Add a new entry to `themeConfig.versions` in [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the release in the dropdown versions menu. + - Commit these changes to `master` and backport them into the backport + branch for this release. ## Minor release (point releases) diff --git a/SECURITY.md b/SECURITY.md index 57d13e565a..133e993c41 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a [bug -bounty](https://hackerone.com/tendermint). +bounty](https://hackerone.com/cosmos). See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. ### Guidelines @@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad ## Scope -The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: +The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: * Any third-party services * Findings from physical testing, such as office access diff --git a/UPGRADING.md b/UPGRADING.md index 99efdf2259..bd6411c6a0 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -98,7 +98,7 @@ are: - `blockchain` - `evidence` -Accordingly, the `node` package was changed to reduce access to +Accordingly, the `node` package changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -109,6 +109,20 @@ will need to change to accommodate these changes. Most notably: longer exported and have been replaced with `node.New` and `node.NewDefault` which provide more functional interfaces. +To access any of the functionality previously available via the +`node.Node` type, use the `*local.Local` "RPC" client, that exposes +the full RPC interface provided as direct function calls. Import the +`github.com/tendermint/tendermint/rpc/client/local` package and pass +the node service as in the following: + +```go + node := node.NewDefault() //construct the node object + // start and set up the node service + + client := local.New(node.(local.NodeService)) + // use client object to interact with the node +``` + ### gRPC Support Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. diff --git a/abci/README.md b/abci/README.md index 4a953dab38..e2234f4d1a 100644 --- a/abci/README.md +++ b/abci/README.md @@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g A detailed description of the ABCI methods and message types is contained in: - [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md) -- [A protobuf file](../proto/tendermint/abci/types.proto) +- [A protobuf file](https://github.com/tendermint/spec/blob/master/proto/tendermint/abci/types.proto) - [A Go interface](./types/application.go) ## Protocol Buffers diff --git a/abci/client/client.go b/abci/client/client.go index c0caea33e6..6a26e3e3dc 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -33,49 +33,39 @@ type Client interface { // Asynchronous requests FlushAsync(context.Context) (*ReqRes, error) - EchoAsync(ctx context.Context, msg string) (*ReqRes, error) - InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error) DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error) CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error) - QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error) - CommitAsync(context.Context) (*ReqRes, error) - InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error) - BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error) - EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error) - ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error) - OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error) - LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error) - ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error) - PreprocessTxsAsync(context.Context, types.RequestPreprocessTxs) (*ReqRes, error) // Synchronous requests - FlushSync(context.Context) error - EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) - CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) - CommitSync(context.Context) (*types.ResponseCommit, error) - InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) - ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) - PreprocessTxsSync(context.Context, types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) + Flush(context.Context) error + Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) + Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) + CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) + Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error) + Commit(context.Context) (*types.ResponseCommit, error) + InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) + PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) + ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) + VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) + BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //---------------------------------------- // NewClient returns a new ABCI client of the specified transport type. // It returns an error if the transport is not "socket" or "grpc" -func NewClient(addr, transport string, mustConnect bool) (client Client, err error) { +func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (client Client, err error) { switch transport { case "socket": - client = NewSocketClient(addr, mustConnect) + client = NewSocketClient(logger, addr, mustConnect) case "grpc": - client = NewGRPCClient(addr, mustConnect) + client = NewGRPCClient(logger, addr, mustConnect) default: err = fmt.Errorf("unknown abci transport %s", transport) } @@ -89,7 +79,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx tmsync.Mutex + mtx sync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } diff --git a/abci/client/creators.go b/abci/client/creators.go index e17b15eca4..a1b65f5fe7 100644 --- a/abci/client/creators.go +++ b/abci/client/creators.go @@ -2,30 +2,31 @@ package abciclient import ( "fmt" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) // Creator creates new ABCI clients. -type Creator func() (Client, error) +type Creator func(log.Logger) (Client, error) // NewLocalCreator returns a Creator for the given app, // which will be running locally. func NewLocalCreator(app types.Application) Creator { - mtx := new(tmsync.Mutex) + mtx := new(sync.Mutex) - return func() (Client, error) { - return NewLocalClient(mtx, app), nil + return func(logger log.Logger) (Client, error) { + return NewLocalClient(logger, mtx, app), nil } } // NewRemoteCreator returns a Creator for the given address (e.g. // "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you // want the client to connect before reporting success. -func NewRemoteCreator(addr, transport string, mustConnect bool) Creator { - return func() (Client, error) { - remoteApp, err := NewClient(addr, transport, mustConnect) +func NewRemoteCreator(logger log.Logger, addr, transport string, mustConnect bool) Creator { + return func(log.Logger) (Client, error) { + remoteApp, err := NewClient(logger, addr, transport, mustConnect) if err != nil { return nil, fmt.Errorf("failed to connect to proxy: %w", err) } diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 98e837c57e..2eb9f7363f 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -2,15 +2,17 @@ package abciclient import ( "context" + "errors" "fmt" "net" "sync" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -18,13 +20,15 @@ import ( // A gRPC client. type grpcClient struct { service.BaseService + logger log.Logger + mustConnect bool client types.ABCIApplicationClient conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx tmsync.Mutex + mtx sync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks @@ -42,8 +46,9 @@ var _ Client = (*grpcClient)(nil) // which is expensive, but easy - if you want something better, use the socket // protocol! maybe one day, if people really want it, we use grpc streams, but // hopefully not :D -func NewGRPCClient(addr string, mustConnect bool) Client { +func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &grpcClient{ + logger: logger, addr: addr, mustConnect: mustConnect, // Buffering the channel is needed to make calls appear asynchronous, @@ -54,7 +59,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client { // gRPC calls while processing a slow callback at the channel head. chReqRes: make(chan *ReqRes, 64), } - cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli) + cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli) return cli } @@ -62,7 +67,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func (cli *grpcClient) OnStart() error { +func (cli *grpcClient) OnStart(ctx context.Context) error { // This processes asynchronous request/response messages and dispatches // them to callbacks. go func() { @@ -84,38 +89,52 @@ func (cli *grpcClient) OnStart() error { cb(reqres.Response) } } - for reqres := range cli.chReqRes { - if reqres != nil { - callCb(reqres) - } else { - cli.Logger.Error("Received nil reqres") + + for { + select { + case reqres := <-cli.chReqRes: + if reqres != nil { + callCb(reqres) + } else { + cli.logger.Error("Received nil reqres") + } + case <-ctx.Done(): + return } + } }() RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(cli.addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialerFunc), + ) if err != nil { if cli.mustConnect { return err } - cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err) + cli.logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err) time.Sleep(time.Second * dialRetryIntervalSeconds) continue RETRY_LOOP } - cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) + cli.logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) client := types.NewABCIApplicationClient(conn) cli.conn = conn ENSURE_CONNECTED: for { - _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) + _, err := client.Echo(ctx, &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) if err == nil { break ENSURE_CONNECTED } - cli.Logger.Error("Echo failed", "err", err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + + cli.logger.Error("Echo failed", "err", err) time.Sleep(time.Second * echoRetryIntervalSeconds) } @@ -142,9 +161,9 @@ func (cli *grpcClient) StopForError(err error) { } cli.mtx.Unlock() - cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error())) + cli.logger.Error("Stopping abci.grpcClient for error", "err", err) if err := cli.Stop(); err != nil { - cli.Logger.Error("Error stopping abci.grpcClient", "err", err) + cli.logger.Error("error stopping abci.grpcClient", "err", err) } } @@ -164,16 +183,6 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) { //---------------------------------------- -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { - req := types.ToRequestEcho(msg) - res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}}) -} - // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) { req := types.ToRequestFlush() @@ -184,16 +193,6 @@ func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}}) } -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) { - req := types.ToRequestInfo(params) - res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}}) -} - // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { req := types.ToRequestDeliverTx(params) @@ -214,115 +213,6 @@ func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestChe return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) } -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) { - req := types.ToRequestQuery(params) - res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) { - req := types.ToRequestCommit() - res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) { - req := types.ToRequestInitChain(params) - res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) { - req := types.ToRequestBeginBlock(params) - res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) { - req := types.ToRequestEndBlock(params) - res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) { - req := types.ToRequestListSnapshots(params) - res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) { - req := types.ToRequestOfferSnapshot(params) - res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) LoadSnapshotChunkAsync( - ctx context.Context, - params types.RequestLoadSnapshotChunk, -) (*ReqRes, error) { - req := types.ToRequestLoadSnapshotChunk(params) - res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) -} - -// NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) ApplySnapshotChunkAsync( - ctx context.Context, - params types.RequestApplySnapshotChunk, -) (*ReqRes, error) { - req := types.ToRequestApplySnapshotChunk(params) - res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - return cli.finishAsyncCall( - ctx, - req, - &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}, - ) -} - -func (cli *grpcClient) PreprocessTxsAsync(ctx context.Context, params types.RequestPreprocessTxs) (*ReqRes, error) { - req := types.ToRequestPreprocessTxs(params) - res, err := cli.client.PreprocessTxs(context.Background(), req.GetPreprocessTxs(), grpc.WaitForReady(true)) - if err != nil { - cli.StopForError(err) - } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_PreprocessTxs{PreprocessTxs: res}}) -} - // finishAsyncCall creates a ReqRes for an async call, and immediately populates it // with the response. We don't complete it until it's been ordered via the channel. func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) { @@ -366,30 +256,22 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response { //---------------------------------------- -func (cli *grpcClient) FlushSync(ctx context.Context) error { - return nil -} +func (cli *grpcClient) Flush(ctx context.Context) error { return nil } -func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { - reqres, err := cli.EchoAsync(ctx, msg) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetEcho(), cli.Error() +func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + req := types.ToRequestEcho(msg) + return cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true)) } -func (cli *grpcClient) InfoSync( +func (cli *grpcClient) Info( ctx context.Context, - req types.RequestInfo, + params types.RequestInfo, ) (*types.ResponseInfo, error) { - reqres, err := cli.InfoAsync(ctx, req) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetInfo(), cli.Error() + req := types.ToRequestInfo(params) + return cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true)) } -func (cli *grpcClient) DeliverTxSync( +func (cli *grpcClient) DeliverTx( ctx context.Context, params types.RequestDeliverTx, ) (*types.ResponseDeliverTx, error) { @@ -401,7 +283,7 @@ func (cli *grpcClient) DeliverTxSync( return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error() } -func (cli *grpcClient) CheckTxSync( +func (cli *grpcClient) CheckTx( ctx context.Context, params types.RequestCheckTx, ) (*types.ResponseCheckTx, error) { @@ -413,115 +295,100 @@ func (cli *grpcClient) CheckTxSync( return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error() } -func (cli *grpcClient) QuerySync( +func (cli *grpcClient) Query( ctx context.Context, - req types.RequestQuery, + params types.RequestQuery, ) (*types.ResponseQuery, error) { - reqres, err := cli.QueryAsync(ctx, req) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetQuery(), cli.Error() + req := types.ToRequestQuery(params) + return cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true)) } -func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { - reqres, err := cli.CommitAsync(ctx) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetCommit(), cli.Error() +func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { + req := types.ToRequestCommit() + return cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true)) } -func (cli *grpcClient) InitChainSync( +func (cli *grpcClient) InitChain( ctx context.Context, params types.RequestInitChain, ) (*types.ResponseInitChain, error) { - reqres, err := cli.InitChainAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetInitChain(), cli.Error() + req := types.ToRequestInitChain(params) + return cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true)) } -func (cli *grpcClient) BeginBlockSync( +func (cli *grpcClient) BeginBlock( ctx context.Context, params types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { - reqres, err := cli.BeginBlockAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error() + req := types.ToRequestBeginBlock(params) + return cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true)) } -func (cli *grpcClient) EndBlockSync( +func (cli *grpcClient) EndBlock( ctx context.Context, params types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { - reqres, err := cli.EndBlockAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error() + req := types.ToRequestEndBlock(params) + return cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ListSnapshotsSync( +func (cli *grpcClient) ListSnapshots( ctx context.Context, params types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { - reqres, err := cli.ListSnapshotsAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error() + req := types.ToRequestListSnapshots(params) + return cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true)) } -func (cli *grpcClient) OfferSnapshotSync( +func (cli *grpcClient) OfferSnapshot( ctx context.Context, params types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { - reqres, err := cli.OfferSnapshotAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error() + req := types.ToRequestOfferSnapshot(params) + return cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true)) } -func (cli *grpcClient) LoadSnapshotChunkSync( +func (cli *grpcClient) LoadSnapshotChunk( ctx context.Context, params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - reqres, err := cli.LoadSnapshotChunkAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error() + req := types.ToRequestLoadSnapshotChunk(params) + return cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) } -func (cli *grpcClient) ApplySnapshotChunkSync( +func (cli *grpcClient) ApplySnapshotChunk( ctx context.Context, params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - reqres, err := cli.ApplySnapshotChunkAsync(ctx, params) - if err != nil { - return nil, err - } - return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error() + req := types.ToRequestApplySnapshotChunk(params) + return cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) } -func (cli *grpcClient) PreprocessTxsSync( +func (cli *grpcClient) PrepareProposal( ctx context.Context, - params types.RequestPreprocessTxs, -) (*types.ResponsePreprocessTxs, error) { + params types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - reqres, err := cli.PreprocessTxsAsync(ctx, params) - if err != nil { - return nil, err - } - return reqres.Response.GetPreprocessTxs(), cli.Error() + req := types.ToRequestPrepareProposal(params) + return cli.client.PrepareProposal(ctx, req.GetPrepareProposal(), grpc.WaitForReady(true)) +} + +func (cli *grpcClient) ExtendVote( + ctx context.Context, + params types.RequestExtendVote) (*types.ResponseExtendVote, error) { + + req := types.ToRequestExtendVote(params) + return cli.client.ExtendVote(ctx, req.GetExtendVote(), grpc.WaitForReady(true)) +} + +func (cli *grpcClient) VerifyVoteExtension( + ctx context.Context, + params types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + + req := types.ToRequestVerifyVoteExtension(params) + return cli.client.VerifyVoteExtension(ctx, req.GetVerifyVoteExtension(), grpc.WaitForReady(true)) } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 622746fbb2..23934138da 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -2,9 +2,10 @@ package abciclient import ( "context" + "sync" types "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -15,7 +16,7 @@ import ( type localClient struct { service.BaseService - mtx *tmsync.Mutex + mtx *sync.Mutex types.Application Callback } @@ -26,22 +27,25 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { +func NewLocalClient(logger log.Logger, mtx *sync.Mutex, app types.Application) Client { if mtx == nil { - mtx = new(tmsync.Mutex) + mtx = new(sync.Mutex) } cli := &localClient{ mtx: mtx, Application: app, } - cli.BaseService = *service.NewBaseService(nil, "localClient", cli) + cli.BaseService = *service.NewBaseService(logger, "localClient", cli) return cli } +func (*localClient) OnStart(context.Context) error { return nil } +func (*localClient) OnStop() {} + func (app *localClient) SetResponseCallback(cb Callback) { app.mtx.Lock() + defer app.mtx.Unlock() app.Callback = cb - app.mtx.Unlock() } // TODO: change types.Application to include Error()? @@ -54,27 +58,6 @@ func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return newLocalReqRes(types.ToRequestFlush(), nil), nil } -func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - return app.callback( - types.ToRequestEcho(msg), - types.ToResponseEcho(msg), - ), nil -} - -func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.Info(req) - return app.callback( - types.ToRequestInfo(req), - types.ToResponseInfo(res), - ), nil -} - func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -97,133 +80,17 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck ), nil } -func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.Query(req) - return app.callback( - types.ToRequestQuery(req), - types.ToResponseQuery(res), - ), nil -} - -func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.Commit() - return app.callback( - types.ToRequestCommit(), - types.ToResponseCommit(res), - ), nil -} - -func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.InitChain(req) - return app.callback( - types.ToRequestInitChain(req), - types.ToResponseInitChain(res), - ), nil -} - -func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.BeginBlock(req) - return app.callback( - types.ToRequestBeginBlock(req), - types.ToResponseBeginBlock(res), - ), nil -} - -func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.EndBlock(req) - return app.callback( - types.ToRequestEndBlock(req), - types.ToResponseEndBlock(res), - ), nil -} - -func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.ListSnapshots(req) - return app.callback( - types.ToRequestListSnapshots(req), - types.ToResponseListSnapshots(res), - ), nil -} - -func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.OfferSnapshot(req) - return app.callback( - types.ToRequestOfferSnapshot(req), - types.ToResponseOfferSnapshot(res), - ), nil -} - -func (app *localClient) LoadSnapshotChunkAsync( - ctx context.Context, - req types.RequestLoadSnapshotChunk, -) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.LoadSnapshotChunk(req) - return app.callback( - types.ToRequestLoadSnapshotChunk(req), - types.ToResponseLoadSnapshotChunk(res), - ), nil -} - -func (app *localClient) ApplySnapshotChunkAsync( - ctx context.Context, - req types.RequestApplySnapshotChunk, -) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.ApplySnapshotChunk(req) - return app.callback( - types.ToRequestApplySnapshotChunk(req), - types.ToResponseApplySnapshotChunk(res), - ), nil -} - -func (app *localClient) PreprocessTxsAsync(ctx context.Context, req types.RequestPreprocessTxs) (*ReqRes, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.PreprocessTxs(req) - return app.callback( - types.ToRequestPreprocessTxs(req), - types.ToResponsePreprocessTx(res), - ), nil -} - //------------------------------------------------------- -func (app *localClient) FlushSync(ctx context.Context) error { +func (app *localClient) Flush(ctx context.Context) error { return nil } -func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { return &types.ResponseEcho{Message: msg}, nil } -func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { +func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -231,7 +98,7 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t return &res, nil } -func (app *localClient) DeliverTxSync( +func (app *localClient) DeliverTx( ctx context.Context, req types.RequestDeliverTx, ) (*types.ResponseDeliverTx, error) { @@ -243,7 +110,7 @@ func (app *localClient) DeliverTxSync( return &res, nil } -func (app *localClient) CheckTxSync( +func (app *localClient) CheckTx( ctx context.Context, req types.RequestCheckTx, ) (*types.ResponseCheckTx, error) { @@ -254,7 +121,7 @@ func (app *localClient) CheckTxSync( return &res, nil } -func (app *localClient) QuerySync( +func (app *localClient) Query( ctx context.Context, req types.RequestQuery, ) (*types.ResponseQuery, error) { @@ -265,7 +132,7 @@ func (app *localClient) QuerySync( return &res, nil } -func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { +func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -273,7 +140,7 @@ func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, return &res, nil } -func (app *localClient) InitChainSync( +func (app *localClient) InitChain( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { @@ -285,7 +152,7 @@ func (app *localClient) InitChainSync( return &res, nil } -func (app *localClient) BeginBlockSync( +func (app *localClient) BeginBlock( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { @@ -297,7 +164,7 @@ func (app *localClient) BeginBlockSync( return &res, nil } -func (app *localClient) EndBlockSync( +func (app *localClient) EndBlock( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { @@ -309,7 +176,7 @@ func (app *localClient) EndBlockSync( return &res, nil } -func (app *localClient) ListSnapshotsSync( +func (app *localClient) ListSnapshots( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { @@ -321,7 +188,7 @@ func (app *localClient) ListSnapshotsSync( return &res, nil } -func (app *localClient) OfferSnapshotSync( +func (app *localClient) OfferSnapshot( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { @@ -333,7 +200,7 @@ func (app *localClient) OfferSnapshotSync( return &res, nil } -func (app *localClient) LoadSnapshotChunkSync( +func (app *localClient) LoadSnapshotChunk( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { @@ -344,7 +211,7 @@ func (app *localClient) LoadSnapshotChunkSync( return &res, nil } -func (app *localClient) ApplySnapshotChunkSync( +func (app *localClient) ApplySnapshotChunk( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { @@ -355,14 +222,36 @@ func (app *localClient) ApplySnapshotChunkSync( return &res, nil } -func (app *localClient) PreprocessTxsSync( +func (app *localClient) PrepareProposal( + ctx context.Context, + req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.PrepareProposal(req) + return &res, nil +} + +func (app *localClient) ExtendVote( + ctx context.Context, + req types.RequestExtendVote) (*types.ResponseExtendVote, error) { + + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ExtendVote(req) + return &res, nil +} + +func (app *localClient) VerifyVoteExtension( ctx context.Context, - req types.RequestPreprocessTxs, -) (*types.ResponsePreprocessTxs, error) { + req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + app.mtx.Lock() defer app.mtx.Unlock() - res := app.Application.PreprocessTxs(req) + res := app.Application.VerifyVoteExtension(req) return &res, nil } diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 03c257d9df..108103ef72 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -7,8 +7,6 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" - log "github.com/tendermint/tendermint/libs/log" - mock "github.com/stretchr/testify/mock" types "github.com/tendermint/tendermint/abci/types" @@ -19,31 +17,8 @@ type Client struct { mock.Mock } -// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseApplySnapshotChunk @@ -65,31 +40,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA return r0, r1 } -// BeginBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BeginBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +// BeginBlock provides a mock function with given fields: _a0, _a1 +func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseBeginBlock @@ -111,31 +63,8 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc return r0, r1 } -// CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CheckTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseCheckTx @@ -157,13 +86,13 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t return r0, r1 } -// CommitAsync provides a mock function with given fields: _a0 -func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0) +// CheckTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abciclient.ReqRes) @@ -171,8 +100,8 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { } var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -180,8 +109,8 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { return r0, r1 } -// CommitSync provides a mock function with given fields: _a0 -func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { +// Commit provides a mock function with given fields: _a0 +func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) { ret := _m.Called(_a0) var r0 *types.ResponseCommit @@ -203,31 +132,8 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) return r0, r1 } -// DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeliverTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { +// DeliverTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseDeliverTx @@ -249,13 +155,13 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) return r0, r1 } -// EchoAsync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { - ret := _m.Called(ctx, msg) +// DeliverTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok { - r0 = rf(ctx, msg) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abciclient.ReqRes) @@ -263,8 +169,8 @@ func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, msg) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -272,8 +178,8 @@ func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes return r0, r1 } -// EchoSync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +// Echo provides a mock function with given fields: ctx, msg +func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { ret := _m.Called(ctx, msg) var r0 *types.ResponseEcho @@ -295,31 +201,8 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho return r0, r1 } -// EndBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EndBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +// EndBlock provides a mock function with given fields: _a0, _a1 +func (_m *Client) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseEndBlock @@ -355,22 +238,22 @@ func (_m *Client) Error() error { return r0 } -// FlushAsync provides a mock function with given fields: _a0 -func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0) +// ExtendVote provides a mock function with given fields: _a0, _a1 +func (_m *Client) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) { + ret := _m.Called(_a0, _a1) - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { - r0 = rf(_a0) + var r0 *types.ResponseExtendVote + if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) + r0 = ret.Get(0).(*types.ResponseExtendVote) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -378,8 +261,8 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { return r0, r1 } -// FlushSync provides a mock function with given fields: _a0 -func (_m *Client) FlushSync(_a0 context.Context) error { +// Flush provides a mock function with given fields: _a0 +func (_m *Client) Flush(_a0 context.Context) error { ret := _m.Called(_a0) var r0 error @@ -392,13 +275,13 @@ func (_m *Client) FlushSync(_a0 context.Context) error { return r0 } -// InfoAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) +// FlushAsync provides a mock function with given fields: _a0 +func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { + ret := _m.Called(_a0) var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abciclient.ReqRes) @@ -406,8 +289,8 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -415,8 +298,8 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl return r0, r1 } -// InfoSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { +// Info provides a mock function with given fields: _a0, _a1 +func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseInfo @@ -438,31 +321,8 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R return r0, r1 } -// InitChainAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// InitChainSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { +// InitChain provides a mock function with given fields: _a0, _a1 +func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseInitChain @@ -498,31 +358,8 @@ func (_m *Client) IsRunning() bool { return r0 } -// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListSnapshotsSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +// ListSnapshots provides a mock function with given fields: _a0, _a1 +func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseListSnapshots @@ -544,31 +381,8 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn return r0, r1 } -// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseLoadSnapshotChunk @@ -590,31 +404,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo return r0, r1 } -// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// OfferSnapshotSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +// OfferSnapshot provides a mock function with given fields: _a0, _a1 +func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseOfferSnapshot @@ -636,54 +427,21 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS return r0, r1 } -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - -// PreprocessTxsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) PreprocessTxsAsync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*abciclient.ReqRes, error) { +// PrepareProposal provides a mock function with given fields: _a0, _a1 +func (_m *Client) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *abciclient.ReqRes); ok { + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) + r0 = ret.Get(0).(*types.ResponsePrepareProposal) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -692,54 +450,8 @@ func (_m *Client) PreprocessTxsAsync(_a0 context.Context, _a1 types.RequestPrepr return r0, r1 } -// PreprocessTxsSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) PreprocessTxsSync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - ret := _m.Called(_a0, _a1) - - var r0 *types.ResponsePreprocessTxs - if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponsePreprocessTxs) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QuerySync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { +// Query provides a mock function with given fields: _a0, _a1 +func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseQuery @@ -761,53 +473,18 @@ func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types return r0, r1 } -// Quit provides a mock function with given fields: -func (_m *Client) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - // SetResponseCallback provides a mock function with given fields: _a0 func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -815,32 +492,41 @@ func (_m *Client) Start() error { return r0 } -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { +// String provides a mock function with given fields: +func (_m *Client) String() string { ret := _m.Called() - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(string) } return r0 } -// String provides a mock function with given fields: -func (_m *Client) String() string { - ret := _m.Called() +// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 +func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + ret := _m.Called(_a0, _a1) - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() + var r0 *types.ResponseVerifyVoteExtension + if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { + r0 = rf(_a0, _a1) } else { - r0 = ret.Get(0).(string) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // Wait provides a mock function with given fields: diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 6eedb38fa9..808c695866 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -9,10 +9,11 @@ import ( "io" "net" "reflect" + "sync" "time" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -32,6 +33,7 @@ type reqResWithContext struct { // general is not meant to be interfaced with concurrent callers. type socketClient struct { service.BaseService + logger log.Logger addr string mustConnect bool @@ -39,7 +41,7 @@ type socketClient struct { reqQueue chan *reqResWithContext - mtx tmsync.Mutex + mtx sync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. @@ -50,22 +52,22 @@ var _ Client = (*socketClient)(nil) // NewSocketClient creates a new socket client, which connects to a given // address. If mustConnect is true, the client will return an error upon start // if it fails to connect. -func NewSocketClient(addr string, mustConnect bool) Client { +func NewSocketClient(logger log.Logger, addr string, mustConnect bool) Client { cli := &socketClient{ + logger: logger, reqQueue: make(chan *reqResWithContext, reqQueueSize), mustConnect: mustConnect, - - addr: addr, - reqSent: list.New(), - resCb: nil, + addr: addr, + reqSent: list.New(), + resCb: nil, } - cli.BaseService = *service.NewBaseService(nil, "socketClient", cli) + cli.BaseService = *service.NewBaseService(logger, "socketClient", cli) return cli } // OnStart implements Service by connecting to the server and spawning reading // and writing goroutines. -func (cli *socketClient) OnStart() error { +func (cli *socketClient) OnStart(ctx context.Context) error { var ( err error conn net.Conn @@ -77,15 +79,15 @@ func (cli *socketClient) OnStart() error { if cli.mustConnect { return err } - cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...", + cli.logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...", cli.addr, dialRetryIntervalSeconds), "err", err) time.Sleep(time.Second * dialRetryIntervalSeconds) continue } cli.conn = conn - go cli.sendRequestsRoutine(conn) - go cli.recvResponseRoutine(conn) + go cli.sendRequestsRoutine(ctx, conn) + go cli.recvResponseRoutine(ctx, conn) return nil } @@ -113,19 +115,25 @@ func (cli *socketClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() + defer cli.mtx.Unlock() cli.resCb = resCb - cli.mtx.Unlock() } //---------------------------------------- -func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { +func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) { bw := bufio.NewWriter(conn) for { select { + case <-ctx.Done(): + return case reqres := <-cli.reqQueue: + if ctx.Err() != nil { + return + } + if reqres.C.Err() != nil { - cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) + cli.logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) continue } cli.willSendReq(reqres.R) @@ -138,16 +146,16 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { cli.stopForError(fmt.Errorf("flush buffer: %w", err)) return } - - case <-cli.Quit(): - return } } } -func (cli *socketClient) recvResponseRoutine(conn io.Reader) { +func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) { r := bufio.NewReader(conn) for { + if ctx.Err() != nil { + return + } var res = &types.Response{} err := types.ReadMessage(r, res) if err != nil { @@ -155,7 +163,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) { return } - // cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) + // cli.logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) switch r := res.Value.(type) { case *types.Response_Exception: // app responded with error @@ -214,18 +222,10 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { //---------------------------------------- -func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg)) -} - func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) { return cli.queueRequestAsync(ctx, types.ToRequestFlush()) } -func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestInfo(req)) -} - func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) { return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req)) } @@ -234,55 +234,9 @@ func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestChec return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req)) } -func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestQuery(req)) -} - -func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestCommit()) -} - -func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req)) -} - -func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req)) -} - -func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req)) -} - -func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req)) -} - -func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req)) -} - -func (cli *socketClient) LoadSnapshotChunkAsync( - ctx context.Context, - req types.RequestLoadSnapshotChunk, -) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req)) -} - -func (cli *socketClient) ApplySnapshotChunkAsync( - ctx context.Context, - req types.RequestApplySnapshotChunk, -) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req)) -} - -func (cli *socketClient) PreprocessTxsAsync(ctx context.Context, req types.RequestPreprocessTxs) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestPreprocessTxs(req)) -} - //---------------------------------------- -func (cli *socketClient) FlushSync(ctx context.Context) error { +func (cli *socketClient) Flush(ctx context.Context) error { reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true) if err != nil { return queueErr(err) @@ -307,158 +261,180 @@ func (cli *socketClient) FlushSync(ctx context.Context) error { } } -func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg)) +func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg)) if err != nil { return nil, err } return reqres.Response.GetEcho(), nil } -func (cli *socketClient) InfoSync( +func (cli *socketClient) Info( ctx context.Context, req types.RequestInfo, ) (*types.ResponseInfo, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req)) if err != nil { return nil, err } return reqres.Response.GetInfo(), nil } -func (cli *socketClient) DeliverTxSync( +func (cli *socketClient) DeliverTx( ctx context.Context, req types.RequestDeliverTx, ) (*types.ResponseDeliverTx, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestDeliverTx(req)) if err != nil { return nil, err } return reqres.Response.GetDeliverTx(), nil } -func (cli *socketClient) CheckTxSync( +func (cli *socketClient) CheckTx( ctx context.Context, req types.RequestCheckTx, ) (*types.ResponseCheckTx, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req)) if err != nil { return nil, err } return reqres.Response.GetCheckTx(), nil } -func (cli *socketClient) QuerySync( +func (cli *socketClient) Query( ctx context.Context, req types.RequestQuery, ) (*types.ResponseQuery, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req)) if err != nil { return nil, err } return reqres.Response.GetQuery(), nil } -func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit()) +func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit()) if err != nil { return nil, err } return reqres.Response.GetCommit(), nil } -func (cli *socketClient) InitChainSync( +func (cli *socketClient) InitChain( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req)) if err != nil { return nil, err } return reqres.Response.GetInitChain(), nil } -func (cli *socketClient) BeginBlockSync( +func (cli *socketClient) BeginBlock( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestBeginBlock(req)) if err != nil { return nil, err } return reqres.Response.GetBeginBlock(), nil } -func (cli *socketClient) EndBlockSync( +func (cli *socketClient) EndBlock( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEndBlock(req)) if err != nil { return nil, err } return reqres.Response.GetEndBlock(), nil } -func (cli *socketClient) ListSnapshotsSync( +func (cli *socketClient) ListSnapshots( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req)) if err != nil { return nil, err } return reqres.Response.GetListSnapshots(), nil } -func (cli *socketClient) OfferSnapshotSync( +func (cli *socketClient) OfferSnapshot( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req)) if err != nil { return nil, err } return reqres.Response.GetOfferSnapshot(), nil } -func (cli *socketClient) LoadSnapshotChunkSync( +func (cli *socketClient) LoadSnapshotChunk( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req)) if err != nil { return nil, err } return reqres.Response.GetLoadSnapshotChunk(), nil } -func (cli *socketClient) ApplySnapshotChunkSync( +func (cli *socketClient) ApplySnapshotChunk( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req)) + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req)) if err != nil { return nil, err } return reqres.Response.GetApplySnapshotChunk(), nil } -func (cli *socketClient) PreprocessTxsSync( +func (cli *socketClient) PrepareProposal( ctx context.Context, - req types.RequestPreprocessTxs, -) (*types.ResponsePreprocessTxs, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestPreprocessTxs(req)) + req types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestPrepareProposal(req)) + if err != nil { + return nil, err + } + return reqres.Response.GetPrepareProposal(), nil +} + +func (cli *socketClient) ExtendVote( + ctx context.Context, + req types.RequestExtendVote) (*types.ResponseExtendVote, error) { + + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestExtendVote(req)) + if err != nil { + return nil, err + } + return reqres.Response.GetExtendVote(), nil +} + +func (cli *socketClient) VerifyVoteExtension( + ctx context.Context, + req types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + + reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestVerifyVoteExtension(req)) if err != nil { return nil, err } - return reqres.Response.GetPreprocessTxs(), nil + return reqres.Response.GetVerifyVoteExtension(), nil } //---------------------------------------- @@ -504,7 +480,7 @@ func (cli *socketClient) queueRequestAsync( return reqres, cli.Error() } -func (cli *socketClient) queueRequestAndFlushSync( +func (cli *socketClient) queueRequestAndFlush( ctx context.Context, req *types.Request, ) (*ReqRes, error) { @@ -514,7 +490,7 @@ func (cli *socketClient) queueRequestAndFlushSync( return nil, queueErr(err) } - if err := cli.FlushSync(ctx); err != nil { + if err := cli.Flush(ctx); err != nil { return nil, err } @@ -572,6 +548,12 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_Query) case *types.Request_InitChain: _, ok = res.Value.(*types.Response_InitChain) + case *types.Request_PrepareProposal: + _, ok = res.Value.(*types.Response_PrepareProposal) + case *types.Request_ExtendVote: + _, ok = res.Value.(*types.Response_ExtendVote) + case *types.Request_VerifyVoteExtension: + _, ok = res.Value.(*types.Response_VerifyVoteExtension) case *types.Request_BeginBlock: _, ok = res.Value.(*types.Response_BeginBlock) case *types.Request_EndBlock: @@ -584,8 +566,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_ListSnapshots) case *types.Request_OfferSnapshot: _, ok = res.Value.(*types.Response_OfferSnapshot) - case *types.Request_PreprocessTxs: - _, ok = res.Value.(*types.Response_PreprocessTxs) } return ok } @@ -599,8 +579,8 @@ func (cli *socketClient) stopForError(err error) { cli.err = err cli.mtx.Unlock() - cli.Logger.Info("Stopping abci.socketClient", "reason", err) + cli.logger.Info("Stopping abci.socketClient", "reason", err) if err := cli.Stop(); err != nil { - cli.Logger.Error("Error stopping abci.socketClient", "err", err) + cli.logger.Error("error stopping abci.socketClient", "err", err) } } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 53ba7b6720..556f98566f 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -14,36 +14,29 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -var ctx = context.Background() - func TestProperSyncCalls(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := slowApp{} + logger := log.NewNopLogger() - s, c := setupClientServer(t, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) + _, c := setupClientServer(ctx, t, logger, app) resp := make(chan error, 1) go func() { - // This is BeginBlockSync unrolled.... - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) + rsp, err := c.BeginBlock(ctx, types.RequestBeginBlock{}) assert.NoError(t, err) - err = c.FlushSync(context.Background()) - assert.NoError(t, err) - res := reqres.Response.GetBeginBlock() - assert.NotNil(t, res) - resp <- c.Error() + assert.NoError(t, c.Flush(ctx)) + assert.NotNil(t, rsp) + select { + case <-ctx.Done(): + case resp <- c.Error(): + } }() select { @@ -55,64 +48,29 @@ func TestProperSyncCalls(t *testing.T) { } } -func TestHangingSyncCalls(t *testing.T) { - app := slowApp{} +func setupClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, +) (service.Service, abciclient.Client) { + t.Helper() - s, c := setupClientServer(t, app) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Log(err) - } - }) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Log(err) - } - }) - - resp := make(chan error, 1) - go func() { - // Start BeginBlock and flush it - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) - assert.NoError(t, err) - flush, err := c.FlushAsync(ctx) - assert.NoError(t, err) - // wait 20 ms for all events to travel socket, but - // no response yet from server - time.Sleep(20 * time.Millisecond) - // kill the server, so the connections break - err = s.Stop() - assert.NoError(t, err) - - // wait for the response from BeginBlock - reqres.Wait() - flush.Wait() - resp <- c.Error() - }() - - select { - case <-time.After(time.Second): - require.Fail(t, "No response arrived") - case err, ok := <-resp: - require.True(t, ok, "Must not close channel") - assert.Error(t, err, "We should get EOF error") - } -} - -func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abciclient.Client) { // some port between 20k and 30k port := 20000 + rand.Int31()%10000 addr := fmt.Sprintf("localhost:%d", port) - s, err := server.NewServer(addr, "socket", app) - require.NoError(t, err) - err = s.Start() + s, err := server.NewServer(logger, addr, "socket", app) require.NoError(t, err) + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) - c := abciclient.NewSocketClient(addr, true) - err = c.Start() - require.NoError(t, err) + c := abciclient.NewSocketClient(logger, addr, true) + require.NoError(t, c.Start(ctx)) + t.Cleanup(c.Wait) + + require.True(t, s.IsRunning()) + require.True(t, c.IsRunning()) return s, c } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 9fae6fc05a..cffbadfb78 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,18 +2,19 @@ package main import ( "bufio" - "context" "encoding/hex" "errors" "fmt" "io" "os" + "os/signal" "strings" + "syscall" "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/version" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" @@ -21,16 +22,12 @@ import ( "github.com/tendermint/tendermint/abci/server" servertest "github.com/tendermint/tendermint/abci/tests/server" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/abci/version" "github.com/tendermint/tendermint/proto/tendermint/crypto" ) // client is a global variable so it can be reused by the console var ( client abciclient.Client - logger log.Logger - - ctx = context.Background() ) // flags @@ -50,34 +47,32 @@ var ( flagPersist string ) -var RootCmd = &cobra.Command{ - Use: "abci-cli", - Short: "the ABCI CLI tool wraps an ABCI client", - Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { +func RootCmmand(logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "abci-cli", + Short: "the ABCI CLI tool wraps an ABCI client", + Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - switch cmd.Use { - case "kvstore", "version": - return nil - } + switch cmd.Use { + case "kvstore", "version": + return nil + } - if logger == nil { - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - } + if client == nil { + var err error + client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false) + if err != nil { + return err + } - if client == nil { - var err error - client, err = abciclient.NewClient(flagAddress, flagAbci, false) - if err != nil { - return err - } - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - return err + if err := client.Start(cmd.Context()); err != nil { + return err + } } - } - return nil - }, + return nil + }, + } } // Structure for data passed to print response. @@ -99,56 +94,46 @@ type queryResponse struct { } func Execute() error { - addGlobalFlags() - addCommands() - return RootCmd.Execute() + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return err + } + + cmd := RootCmmand(logger) + addGlobalFlags(cmd) + addCommands(cmd, logger) + return cmd.Execute() } -func addGlobalFlags() { - RootCmd.PersistentFlags().StringVarP(&flagAddress, +func addGlobalFlags(cmd *cobra.Command) { + cmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket") - RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc") - RootCmd.PersistentFlags().BoolVarP(&flagVerbose, + cmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc") + cmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session") - RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level") -} - -func addQueryFlags() { - queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with") - queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at") - queryCmd.PersistentFlags().BoolVarP(&flagProve, - "prove", - "", - false, - "whether or not to return a merkle proof of the query result") -} - -func addKVStoreFlags() { - kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database") + cmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level") } -func addCommands() { - RootCmd.AddCommand(batchCmd) - RootCmd.AddCommand(consoleCmd) - RootCmd.AddCommand(echoCmd) - RootCmd.AddCommand(infoCmd) - RootCmd.AddCommand(deliverTxCmd) - RootCmd.AddCommand(checkTxCmd) - RootCmd.AddCommand(commitCmd) - RootCmd.AddCommand(versionCmd) - RootCmd.AddCommand(testCmd) - addQueryFlags() - RootCmd.AddCommand(queryCmd) +func addCommands(cmd *cobra.Command, logger log.Logger) { + cmd.AddCommand(batchCmd) + cmd.AddCommand(consoleCmd) + cmd.AddCommand(echoCmd) + cmd.AddCommand(infoCmd) + cmd.AddCommand(deliverTxCmd) + cmd.AddCommand(checkTxCmd) + cmd.AddCommand(commitCmd) + cmd.AddCommand(versionCmd) + cmd.AddCommand(testCmd) + cmd.AddCommand(getQueryCmd()) // examples - addKVStoreFlags() - RootCmd.AddCommand(kvstoreCmd) + cmd.AddCommand(getKVStoreCmd(logger)) } var batchCmd = &cobra.Command{ @@ -233,25 +218,43 @@ var versionCmd = &cobra.Command{ Long: "print ABCI console version", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { - fmt.Println(version.Version) + fmt.Println(version.ABCIVersion) return nil }, } -var queryCmd = &cobra.Command{ - Use: "query", - Short: "query the application state", - Long: "query the application state", - Args: cobra.ExactArgs(1), - RunE: cmdQuery, +func getQueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "query", + Short: "query the application state", + Long: "query the application state", + Args: cobra.ExactArgs(1), + RunE: cmdQuery, + } + + cmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with") + cmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at") + cmd.PersistentFlags().BoolVarP(&flagProve, + "prove", + "", + false, + "whether or not to return a merkle proof of the query result") + + return cmd } -var kvstoreCmd = &cobra.Command{ - Use: "kvstore", - Short: "ABCI demo example", - Long: "ABCI demo example", - Args: cobra.ExactArgs(0), - RunE: cmdKVStore, +func getKVStoreCmd(logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "kvstore", + Short: "ABCI demo example", + Long: "ABCI demo example", + Args: cobra.ExactArgs(0), + RunE: makeKVStoreCmd(logger), + } + + cmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database") + return cmd + } var testCmd = &cobra.Command{ @@ -292,23 +295,24 @@ func compose(fs []func() error) error { } func cmdTest(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() return compose( []func() error{ - func() error { return servertest.InitChain(client) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, - func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, - func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, + func() error { return servertest.InitChain(ctx, client) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte("abc"), code.CodeTypeBadNonce, nil) }, + func() error { return servertest.Commit(ctx, client, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeOK, nil) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00}, code.CodeTypeBadNonce, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x01}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) }, + func() error { return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) }, func() error { - return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + return servertest.DeliverTx(ctx, client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) }, - func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, + func() error { return servertest.Commit(ctx, client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) }, }) } @@ -426,12 +430,9 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { }) fmt.Println("Available commands:") - fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short) - fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short) - fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short) - fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) - fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) - fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short) + for _, cmd := range cmd.Commands() { + fmt.Printf("%s: %s\n", cmd.Use, cmd.Short) + } fmt.Println("Use \"[command] --help\" for more information about a command.") return nil @@ -443,13 +444,15 @@ func cmdEcho(cmd *cobra.Command, args []string) error { if len(args) > 0 { msg = args[0] } - res, err := client.EchoSync(ctx, msg) + res, err := client.Echo(cmd.Context(), msg) if err != nil { return err } + printResponse(cmd, args, response{ Data: []byte(res.Message), }) + return nil } @@ -459,7 +462,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) + res, err := client.Info(cmd.Context(), types.RequestInfo{Version: version}) if err != nil { return err } @@ -484,7 +487,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTx(cmd.Context(), types.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -510,7 +513,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTx(cmd.Context(), types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -525,7 +528,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.CommitSync(ctx) + res, err := client.Commit(cmd.Context()) if err != nil { return err } @@ -550,7 +553,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(ctx, types.RequestQuery{ + resQuery, err := client.Query(cmd.Context(), types.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -573,38 +576,34 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return nil } -func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(logger, flagPersist) + } - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - app = kvstore.NewApplication() - } else { - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - } + // Start the listener + srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app) + if err != nil { + return err + } - // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) + if err := srv.Start(ctx); err != nil { + return err } - }) - // Run forever. - select {} + // Run forever. + <-ctx.Done() + return nil + } + } //-------------------------------------------------------------------------------- diff --git a/abci/example/example_test.go b/abci/example/example_test.go index cde8a15b16..99c7cc35c6 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -29,47 +30,52 @@ func init() { } func TestKVStore(t *testing.T) { - fmt.Println("### Testing KVStore") - testStream(t, kvstore.NewApplication()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + logger.Info("### Testing KVStore") + testStream(ctx, t, logger, kvstore.NewApplication()) } func TestBaseApp(t *testing.T) { - fmt.Println("### Testing BaseApp") - testStream(t, types.NewBaseApplication()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + logger.Info("### Testing BaseApp") + testStream(ctx, t, logger, types.NewBaseApplication()) } func TestGRPC(t *testing.T) { - fmt.Println("### Testing GRPC") - testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + logger.Info("### Testing GRPC") + testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication())) } -func testStream(t *testing.T, app types.Application) { +func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) { + t.Helper() + const numDeliverTxs = 20000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - // Start the listener - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - err := server.Start() + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + t.Cleanup(server.Wait) + err := server.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(log.TestingLogger().With("module", "abci-client")) - err = client.Start() + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) + t.Cleanup(client.Wait) + + err = client.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) done := make(chan struct{}) counter := 0 @@ -98,8 +104,6 @@ func testStream(t *testing.T, app types.Application) { } }) - ctx := context.Background() - // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request @@ -108,7 +112,7 @@ func testStream(t *testing.T, app types.Application) { // Sometimes send flush messages if counter%128 == 0 { - err = client.FlushSync(context.Background()) + err = client.Flush(ctx) require.NoError(t, err) } } @@ -127,30 +131,25 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { +func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) { + t.Helper() numDeliverTxs := 2000 - socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) + socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) // Start the listener - server := abciserver.NewGRPCServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting GRPC server: %v", err.Error()) - } + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, server.Start(ctx)) + t.Cleanup(func() { server.Wait() }) // Connect to the socket - conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) - if err != nil { - t.Fatalf("Error dialing GRPC server: %v", err.Error()) - } + conn, err := grpc.Dial(socket, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialerFunc), + ) + require.NoError(t, err, "Error dialing GRPC server") t.Cleanup(func() { if err := conn.Close(); err != nil { @@ -163,10 +162,9 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request - response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")}) - if err != nil { - t.Fatalf("Error in GRPC DeliverTx: %v", err.Error()) - } + response, err := client.DeliverTx(ctx, &types.RequestDeliverTx{Tx: []byte("test")}) + require.NoError(t, err, "Error in GRPC DeliverTx") + counter++ if response.Code != code.CodeTypeOK { t.Error("DeliverTx failed with ret_code", response.Code) diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index edc2c47a53..a768342f8b 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -4,7 +4,7 @@ There are two app's here: the KVStoreApplication and the PersistentKVStoreApplic ## KVStoreApplication -The KVStoreApplication is a simple merkle key-value store. +The KVStoreApplication is a simple merkle key-value store. Transactions of the form `key=value` are stored as key-value pairs in the tree. Transactions without an `=` sign set the value to the key. The app has no replay protection (other than what the mempool provides). @@ -12,7 +12,7 @@ The app has no replay protection (other than what the mempool provides). ## PersistentKVStoreApplication The PersistentKVStoreApplication wraps the KVStoreApplication -and provides two additional features: +and provides three additional features: 1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism) 2) validator set changes @@ -27,4 +27,4 @@ Validator set changes are effected using the following transaction format: where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). To remove a validator from the validator set, set power to `0`. -There is no sybil protection against new validators joining. +There is no sybil protection against new validators joining. diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 9fbaebc8a2..b6cbce1d92 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -172,7 +172,8 @@ func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.Respo return resQuery } -func (app *Application) PreprocessTxs( - req types.RequestPreprocessTxs) types.ResponsePreprocessTxs { - return types.ResponsePreprocessTxs{Txs: req.Txs} +func (app *Application) PrepareProposal( + req types.RequestPrepareProposal) types.ResponsePrepareProposal { + return types.ResponsePrepareProposal{ + BlockData: req.BlockData} } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 9d026bd876..0c104f6d73 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -3,7 +3,7 @@ package kvstore import ( "context" "fmt" - "io/ioutil" + "os" "sort" "testing" @@ -24,8 +24,6 @@ const ( testValue = "def" ) -var ctx = context.Background() - func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) @@ -74,11 +72,13 @@ func TestKVStoreKV(t *testing.T) { } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } - kvstore := NewPersistentKVStoreApplication(dir) + logger := log.NewTestingLogger(t) + + kvstore := NewPersistentKVStoreApplication(logger, dir) key := testKey value := key tx := []byte(key) @@ -90,11 +90,13 @@ func TestPersistentKVStoreKV(t *testing.T) { } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } - kvstore := NewPersistentKVStoreApplication(dir) + logger := log.NewTestingLogger(t) + + kvstore := NewPersistentKVStoreApplication(logger, dir) InitKVStore(kvstore) height := int64(0) @@ -122,11 +124,13 @@ func TestPersistentKVStoreInfo(t *testing.T) { // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO + dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO if err != nil { t.Fatal(err) } - kvstore := NewPersistentKVStoreApplication(dir) + logger := log.NewTestingLogger(t) + + kvstore := NewPersistentKVStoreApplication(logger, dir) // init with some validators total := 10 @@ -229,136 +233,136 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeSocketClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err = server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeGRPCClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) - server := abciserver.NewGRPCServer(socket, gapp) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, gapp) + + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } - client := abciclient.NewGRPCClient(socket, true) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err := server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) + + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } func TestClientServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + // set up socket app kvstore := NewApplication() - client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") + client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket") require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); server.Wait() }) + t.Cleanup(func() { cancel(); client.Wait() }) - runClientTests(t, client) + runClientTests(ctx, t, client) // set up grpc app kvstore = NewApplication() - gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc") + gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc") require.NoError(t, err) - t.Cleanup(func() { - if err := gserver.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := gclient.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); gserver.Wait() }) + t.Cleanup(func() { cancel(); gclient.Wait() }) - runClientTests(t, gclient) + runClientTests(ctx, t, gclient) } -func runClientTests(t *testing.T, client abciclient.Client) { +func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key tx := []byte(key) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) } -func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) { - ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) +func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { + ar, err := app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error - ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) + ar, err = app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // commit - _, err = app.CommitSync(ctx) + _, err = app.Commit(ctx) require.NoError(t, err) - info, err := app.InfoSync(ctx, types.RequestInfo{}) + info, err := app.Info(ctx, types.RequestInfo{}) require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery, err := app.QuerySync(ctx, types.RequestQuery{ + resQuery, err := app.Query(ctx, types.RequestQuery{ Path: "/store", Data: []byte(key), }) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery, err = app.QuerySync(ctx, types.RequestQuery{ + resQuery, err = app.Query(ctx, types.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, }) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index d4906b4f3b..83922eb803 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -14,6 +14,7 @@ import ( "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" + ptypes "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -35,7 +36,7 @@ type PersistentKVStoreApplication struct { logger log.Logger } -func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication { +func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication { name := "kvstore" db, err := dbm.NewGoLevelDB(name, dbDir) if err != nil { @@ -47,7 +48,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication return &PersistentKVStoreApplication{ app: &Application{state: state}, valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey), - logger: log.NewNopLogger(), + logger: logger, } } @@ -55,10 +56,6 @@ func (app *PersistentKVStoreApplication) Close() error { return app.app.state.db.Close() } -func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) { - app.logger = l -} - func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo { res := app.app.Info(req) res.LastBlockHeight = app.app.state.Height @@ -76,6 +73,10 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t return app.execValidatorTx(req.Tx) } + if isPrepareTx(req.Tx) { + return app.execPrepareTx(req.Tx) + } + // otherwise, update the key-value store return app.app.DeliverTx(req) } @@ -113,7 +114,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t for _, v := range req.Validators { r := app.updateValidator(v) if r.IsErr() { - app.logger.Error("Error updating validators", "r", r) + app.logger.Error("error updating validators", "r", r) } } return types.ResponseInitChain{} @@ -170,9 +171,22 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk( return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT} } -func (app *PersistentKVStoreApplication) PreprocessTxs( - req types.RequestPreprocessTxs) types.ResponsePreprocessTxs { - return types.ResponsePreprocessTxs{Txs: req.Txs} +func (app *PersistentKVStoreApplication) ExtendVote( + req types.RequestExtendVote) types.ResponseExtendVote { + return types.ResponseExtendVote{ + VoteExtension: ConstructVoteExtension(req.Vote.ValidatorAddress), + } +} + +func (app *PersistentKVStoreApplication) VerifyVoteExtension( + req types.RequestVerifyVoteExtension) types.ResponseVerifyVoteExtension { + return types.RespondVerifyVoteExtension( + app.verifyExtension(req.Vote.ValidatorAddress, req.Vote.VoteExtension)) +} + +func (app *PersistentKVStoreApplication) PrepareProposal( + req types.RequestPrepareProposal) types.ResponsePrepareProposal { + return types.ResponsePrepareProposal{BlockData: app.substPrepareTx(req.BlockData)} } //--------------------------------------------- @@ -276,7 +290,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate if err := types.WriteMessage(&v, value); err != nil { return types.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Error encoding validator: %v", err)} + Log: fmt.Sprintf("error encoding validator: %v", err)} } if err = app.app.state.db.Set(key, value.Bytes()); err != nil { panic(err) @@ -289,3 +303,53 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate return types.ResponseDeliverTx{Code: code.CodeTypeOK} } + +// ----------------------------- + +const PreparePrefix = "prepare" + +func isPrepareTx(tx []byte) bool { + return strings.HasPrefix(string(tx), PreparePrefix) +} + +// execPrepareTx is noop. tx data is considered as placeholder +// and is substitute at the PrepareProposal. +func (app *PersistentKVStoreApplication) execPrepareTx(tx []byte) types.ResponseDeliverTx { + // noop + return types.ResponseDeliverTx{} +} + +// substPrepareTx subst all the preparetx in the blockdata +// to null string(could be any arbitrary string). +func (app *PersistentKVStoreApplication) substPrepareTx(blockData [][]byte) [][]byte { + // TODO: this mechanism will change with the current spec of PrepareProposal + // We now have a special type for marking a tx as changed + for i, tx := range blockData { + if isPrepareTx(tx) { + blockData[i] = make([]byte, len(tx)) + } + } + + return blockData +} + +func ConstructVoteExtension(valAddr []byte) *ptypes.VoteExtension { + return &ptypes.VoteExtension{ + AppDataToSign: valAddr, + AppDataSelfAuthenticating: valAddr, + } +} + +func (app *PersistentKVStoreApplication) verifyExtension(valAddr []byte, ext *ptypes.VoteExtension) bool { + if ext == nil { + return false + } + canonical := ConstructVoteExtension(valAddr) + if !bytes.Equal(canonical.AppDataToSign, ext.AppDataToSign) { + return false + } + if !bytes.Equal(canonical.AppDataSelfAuthenticating, ext.AppDataSelfAuthenticating) { + return false + } + return true +} diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 503f0b64f1..8559fd6e3d 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -1,17 +1,20 @@ package server import ( + "context" "net" "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) type GRPCServer struct { service.BaseService + logger log.Logger proto string addr string @@ -22,20 +25,21 @@ type GRPCServer struct { } // NewGRPCServer returns a new gRPC ABCI server -func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service { +func NewGRPCServer(logger log.Logger, protoAddr string, app types.ABCIApplicationServer) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ + logger: logger, proto: proto, addr: addr, listener: nil, app: app, } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } // OnStart starts the gRPC service. -func (s *GRPCServer) OnStart() error { +func (s *GRPCServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { @@ -46,10 +50,15 @@ func (s *GRPCServer) OnStart() error { s.server = grpc.NewServer() types.RegisterABCIApplicationServer(s.server, s.app) - s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) + s.logger.Info("Listening", "proto", s.proto, "addr", s.addr) go func() { + go func() { + <-ctx.Done() + s.server.GracefulStop() + }() + if err := s.server.Serve(s.listener); err != nil { - s.Logger.Error("Error serving gRPC server", "err", err) + s.logger.Error("error serving gRPC server", "err", err) } }() return nil diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad020..2a6d50fd23 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -12,17 +12,18 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { +func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) { var s service.Service var err error switch transport { case "socket": - s = NewSocketServer(protoAddr, app) + s = NewSocketServer(logger, protoAddr, app) case "grpc": - s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + s = NewGRPCServer(logger, protoAddr, types.NewGRPCApplication(app)) default: err = fmt.Errorf("unknown server type %s", transport) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 0e1250fe1a..3ccab7ad49 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -2,15 +2,15 @@ package server import ( "bufio" + "context" "fmt" "io" "net" - "os" "runtime" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - tmlog "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -19,61 +19,58 @@ import ( type SocketServer struct { service.BaseService - isLoggerSet bool + logger log.Logger proto string addr string listener net.Listener - connsMtx tmsync.Mutex + connsMtx sync.Mutex conns map[int]net.Conn nextConnID int - appMtx tmsync.Mutex + appMtx sync.Mutex app types.Application } -func NewSocketServer(protoAddr string, app types.Application) service.Service { +func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &SocketServer{ + logger: logger, proto: proto, addr: addr, listener: nil, app: app, conns: make(map[int]net.Conn), } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } -func (s *SocketServer) SetLogger(l tmlog.Logger) { - s.BaseService.SetLogger(l) - s.isLoggerSet = true -} - -func (s *SocketServer) OnStart() error { +func (s *SocketServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } s.listener = ln - go s.acceptConnectionsRoutine() + go s.acceptConnectionsRoutine(ctx) return nil } func (s *SocketServer) OnStop() { if err := s.listener.Close(); err != nil { - s.Logger.Error("Error closing listener", "err", err) + s.logger.Error("error closing listener", "err", err) } s.connsMtx.Lock() defer s.connsMtx.Unlock() + for id, conn := range s.conns { delete(s.conns, id) if err := conn.Close(); err != nil { - s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err) + s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err) } } } @@ -103,20 +100,25 @@ func (s *SocketServer) rmConn(connID int) error { return conn.Close() } -func (s *SocketServer) acceptConnectionsRoutine() { +func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) { for { + if ctx.Err() != nil { + return + + } + // Accept a connection - s.Logger.Info("Waiting for new connection...") + s.logger.Info("Waiting for new connection...") conn, err := s.listener.Accept() if err != nil { if !s.IsRunning() { return // Ignore error from listener closing. } - s.Logger.Error("Failed to accept connection", "err", err) + s.logger.Error("Failed to accept connection", "err", err) continue } - s.Logger.Info("Accepted a new connection") + s.logger.Info("Accepted a new connection") connID := s.addConn(conn) @@ -124,35 +126,46 @@ func (s *SocketServer) acceptConnectionsRoutine() { responses := make(chan *types.Response, 1000) // A channel to buffer responses // Read requests from conn and deal with them - go s.handleRequests(closeConn, conn, responses) + go s.handleRequests(ctx, closeConn, conn, responses) // Pull responses from 'responses' and write them to conn. - go s.handleResponses(closeConn, conn, responses) + go s.handleResponses(ctx, closeConn, conn, responses) // Wait until signal to close connection - go s.waitForClose(closeConn, connID) + go s.waitForClose(ctx, closeConn, connID) } } -func (s *SocketServer) waitForClose(closeConn chan error, connID int) { - err := <-closeConn - switch { - case err == io.EOF: - s.Logger.Error("Connection was closed by client") - case err != nil: - s.Logger.Error("Connection error", "err", err) - default: - // never happens - s.Logger.Error("Connection was closed") - } +func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, connID int) { + defer func() { + // Close the connection + if err := s.rmConn(connID); err != nil { + s.logger.Error("error closing connection", "err", err) + } + }() - // Close the connection - if err := s.rmConn(connID); err != nil { - s.Logger.Error("Error closing connection", "err", err) + select { + case <-ctx.Done(): + return + case err := <-closeConn: + switch { + case err == io.EOF: + s.logger.Error("Connection was closed by client") + case err != nil: + s.logger.Error("Connection error", "err", err) + default: + // never happens + s.logger.Error("Connection was closed") + } } } // Read requests from conn and deal with them -func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { +func (s *SocketServer) handleRequests( + ctx context.Context, + closeConn chan error, + conn io.Reader, + responses chan<- *types.Response, +) { var count int var bufReader = bufio.NewReader(conn) @@ -164,15 +177,15 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] err := fmt.Errorf("recovered from panic: %v\n%s", r, buf) - if !s.isLoggerSet { - fmt.Fprintln(os.Stderr, err) - } closeConn <- err s.appMtx.Unlock() } }() for { + if ctx.Err() != nil { + return + } var req = &types.Request{} err := types.ReadMessage(bufReader, req) @@ -227,31 +240,54 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_OfferSnapshot: res := s.app.OfferSnapshot(*r.OfferSnapshot) responses <- types.ToResponseOfferSnapshot(res) + case *types.Request_PrepareProposal: + res := s.app.PrepareProposal(*r.PrepareProposal) + responses <- types.ToResponsePrepareProposal(res) case *types.Request_LoadSnapshotChunk: res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) responses <- types.ToResponseLoadSnapshotChunk(res) case *types.Request_ApplySnapshotChunk: res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk) responses <- types.ToResponseApplySnapshotChunk(res) - case *types.Request_PreprocessTxs: - res := s.app.PreprocessTxs(*r.PreprocessTxs) - responses <- types.ToResponsePreprocessTx(res) + case *types.Request_ExtendVote: + res := s.app.ExtendVote(*r.ExtendVote) + responses <- types.ToResponseExtendVote(res) + case *types.Request_VerifyVoteExtension: + res := s.app.VerifyVoteExtension(*r.VerifyVoteExtension) + responses <- types.ToResponseVerifyVoteExtension(res) default: responses <- types.ToResponseException("Unknown request") } } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { +func (s *SocketServer) handleResponses( + ctx context.Context, + closeConn chan error, + conn io.Writer, + responses <-chan *types.Response, +) { bw := bufio.NewWriter(conn) - for res := range responses { - if err := types.WriteMessage(res, bw); err != nil { - closeConn <- fmt.Errorf("error writing message: %w", err) - return - } - if err := bw.Flush(); err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %w", err) + for { + select { + case <-ctx.Done(): return + case res := <-responses: + if err := types.WriteMessage(res, bw); err != nil { + select { + case <-ctx.Done(): + case closeConn <- fmt.Errorf("error writing message: %w", err): + } + return + } + if err := bw.Flush(); err != nil { + select { + case <-ctx.Done(): + case closeConn <- fmt.Errorf("error flushing write buffer: %w", err): + } + + return + } } } } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 62dc6e07e4..a97c0c7c4c 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -1,27 +1,40 @@ package tests import ( + "context" "testing" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/libs/log" ) func TestClientServerNoAddrPrefix(t *testing.T) { - addr := "localhost:26658" - transport := "socket" + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const ( + addr = "localhost:26658" + transport = "socket" + ) app := kvstore.NewApplication() + logger := log.NewTestingLogger(t) - server, err := abciserver.NewServer(addr, transport, app) + server, err := abciserver.NewServer(logger, addr, transport, app) assert.NoError(t, err, "expected no error on NewServer") - err = server.Start() + err = server.Start(ctx) assert.NoError(t, err, "expected no error on server.Start") + t.Cleanup(server.Wait) - client, err := abciclientent.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(logger, addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") - err = client.Start() + err = client.Start(ctx) assert.NoError(t, err, "expected no error on client.Start") + t.Cleanup(client.Wait) } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 23adbe80de..2b2d579610 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -12,9 +12,7 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" ) -var ctx = context.Background() - -func InitChain(client abciclient.Client) error { +func InitChain(ctx context.Context, client abciclient.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { @@ -23,7 +21,7 @@ func InitChain(client abciclient.Client) error { power := mrand.Int() vals[i] = types.UpdateValidator(pubkey, int64(power), "") } - _, err := client.InitChainSync(ctx, types.RequestInitChain{ + _, err := client.InitChain(ctx, types.RequestInitChain{ Validators: vals, }) if err != nil { @@ -34,8 +32,8 @@ func InitChain(client abciclient.Client) error { return nil } -func Commit(client abciclient.Client, hashExp []byte) error { - res, err := client.CommitSync(ctx) +func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { + res, err := client.Commit(ctx) data := res.Data if err != nil { fmt.Println("Failed test: Commit") @@ -51,8 +49,8 @@ func Commit(client abciclient.Client, hashExp []byte) error { return nil } -func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) +func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { + res, _ := client.DeliverTx(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: DeliverTx") @@ -70,8 +68,8 @@ func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp return nil } -func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) +func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { + res, _ := client.CheckTx(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/abci/types/application.go b/abci/types/application.go index 4a46351a2b..7f00d4ddc7 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -2,6 +2,7 @@ package types import ( "context" + fmt "fmt" ) // Application is an interface that enables any finite, deterministic state machine @@ -18,12 +19,20 @@ type Application interface { CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool // Consensus Connection - InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore - BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block - DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing - EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set - Commit() ResponseCommit // Commit the state and return the application Merkle root hash - PreprocessTxs(RequestPreprocessTxs) ResponsePreprocessTxs // State machine preprocessing of txs + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore + PrepareProposal(RequestPrepareProposal) ResponsePrepareProposal + // Signals the beginning of a block + BeginBlock(RequestBeginBlock) ResponseBeginBlock + // Deliver a tx for full processing + DeliverTx(RequestDeliverTx) ResponseDeliverTx + // Signals the end of a block, returns changes to the validator set + EndBlock(RequestEndBlock) ResponseEndBlock + // Commit the state and return the application Merkle root hash + Commit() ResponseCommit + // Create application specific vote extension + ExtendVote(RequestExtendVote) ResponseExtendVote + // Verify application's vote extension data + VerifyVoteExtension(RequestVerifyVoteExtension) ResponseVerifyVoteExtension // State Sync Connection ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots @@ -60,6 +69,14 @@ func (BaseApplication) Commit() ResponseCommit { return ResponseCommit{} } +func (BaseApplication) ExtendVote(req RequestExtendVote) ResponseExtendVote { + return ResponseExtendVote{} +} + +func (BaseApplication) VerifyVoteExtension(req RequestVerifyVoteExtension) ResponseVerifyVoteExtension { + return ResponseVerifyVoteExtension{} +} + func (BaseApplication) Query(req RequestQuery) ResponseQuery { return ResponseQuery{Code: CodeTypeOK} } @@ -92,8 +109,8 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons return ResponseApplySnapshotChunk{} } -func (BaseApplication) PreprocessTxs(req RequestPreprocessTxs) ResponsePreprocessTxs { - return ResponsePreprocessTxs{} +func (BaseApplication) PrepareProposal(req RequestPrepareProposal) ResponsePrepareProposal { + return ResponsePrepareProposal{} } //------------------------------------------------------- @@ -179,8 +196,27 @@ func (app *GRPCApplication) ApplySnapshotChunk( return &res, nil } -func (app *GRPCApplication) PreprocessTxs( - ctx context.Context, req *RequestPreprocessTxs) (*ResponsePreprocessTxs, error) { - res := app.app.PreprocessTxs(*req) +func (app *GRPCApplication) ExtendVote( + ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { + res := app.app.ExtendVote(*req) + return &res, nil +} + +func (app *GRPCApplication) VerifyVoteExtension( + ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { + res := app.app.VerifyVoteExtension(*req) + return &res, nil +} + +func (app *GRPCApplication) PrepareProposal( + ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + res := app.app.PrepareProposal(*req) return &res, nil } + +func (app *GRPCApplication) ProcessProposal( + ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + fmt.Println("ProcessProposal not implemented yet.") + // res := app.app.ProcessProposal(*req) + return nil, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go index 7a7aa337c8..ec2a2d28d8 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -110,9 +110,21 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request { } } -func ToRequestPreprocessTxs(res RequestPreprocessTxs) *Request { +func ToRequestExtendVote(req RequestExtendVote) *Request { return &Request{ - Value: &Request_PreprocessTxs{&res}, + Value: &Request_ExtendVote{&req}, + } +} + +func ToRequestVerifyVoteExtension(req RequestVerifyVoteExtension) *Request { + return &Request{ + Value: &Request_VerifyVoteExtension{&req}, + } +} + +func ToRequestPrepareProposal(req RequestPrepareProposal) *Request { + return &Request{ + Value: &Request_PrepareProposal{&req}, } } @@ -207,8 +219,20 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response { } } -func ToResponsePreprocessTx(res ResponsePreprocessTxs) *Response { +func ToResponseExtendVote(res ResponseExtendVote) *Response { + return &Response{ + Value: &Response_ExtendVote{&res}, + } +} + +func ToResponseVerifyVoteExtension(res ResponseVerifyVoteExtension) *Response { + return &Response{ + Value: &Response_VerifyVoteExtension{&res}, + } +} + +func ToResponsePrepareProposal(res ResponsePrepareProposal) *Response { return &Response{ - Value: &Response_PreprocessTxs{&res}, + Value: &Response_PrepareProposal{&res}, } } diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 491d10c7f8..fb219fe078 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -14,7 +14,7 @@ import ( func TestMarshalJSON(t *testing.T) { b, err := json.Marshal(&ResponseDeliverTx{}) - assert.Nil(t, err) + assert.NoError(t, err) // include empty fields. assert.True(t, strings.Contains(string(b), "code")) r1 := ResponseCheckTx{ @@ -31,11 +31,11 @@ func TestMarshalJSON(t *testing.T) { }, } b, err = json.Marshal(&r1) - assert.Nil(t, err) + assert.NoError(t, err) var r2 ResponseCheckTx err = json.Unmarshal(b, &r2) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, r1, r2) } @@ -49,11 +49,11 @@ func TestWriteReadMessageSimple(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(RequestEcho) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -71,11 +71,11 @@ func TestWriteReadMessage(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(tmproto.Header) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -103,11 +103,11 @@ func TestWriteReadMessage2(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(ResponseCheckTx) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } diff --git a/abci/types/result.go b/abci/types/result.go index dba6bfd159..a08c3fda57 100644 --- a/abci/types/result.go +++ b/abci/types/result.go @@ -5,6 +5,8 @@ import ( "encoding/json" "github.com/gogo/protobuf/jsonpb" + + types "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -41,6 +43,16 @@ func (r ResponseQuery) IsErr() bool { return r.Code != CodeTypeOK } +// IsOK returns true if Code is OK +func (r ResponseVerifyVoteExtension) IsOK() bool { + return r.Result <= ResponseVerifyVoteExtension_ACCEPT +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseVerifyVoteExtension) IsErr() bool { + return r.Result > ResponseVerifyVoteExtension_ACCEPT +} + //--------------------------------------------------------------------------- // override JSON marshaling so we emit defaults (ie. disable omitempty) @@ -118,3 +130,25 @@ var _ jsonRoundTripper = (*ResponseDeliverTx)(nil) var _ jsonRoundTripper = (*ResponseCheckTx)(nil) var _ jsonRoundTripper = (*EventAttribute)(nil) + +// ----------------------------------------------- +// construct Result data + +func RespondExtendVote(appDataToSign, appDataSelfAuthenticating []byte) ResponseExtendVote { + return ResponseExtendVote{ + VoteExtension: &types.VoteExtension{ + AppDataToSign: appDataToSign, + AppDataSelfAuthenticating: appDataSelfAuthenticating, + }, + } +} + +func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension { + result := ResponseVerifyVoteExtension_REJECT + if ok { + result = ResponseVerifyVoteExtension_ACCEPT + } + return ResponseVerifyVoteExtension{ + Result: result, + } +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 88fff7b135..25f814ba6a 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -120,7 +120,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29, 0} + return fileDescriptor_252557cfdd89a31a, []int{32, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -157,7 +157,66 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31, 0} + return fileDescriptor_252557cfdd89a31a, []int{34, 0} +} + +type ResponseVerifyVoteExtension_Result int32 + +const ( + ResponseVerifyVoteExtension_UNKNOWN ResponseVerifyVoteExtension_Result = 0 + ResponseVerifyVoteExtension_ACCEPT ResponseVerifyVoteExtension_Result = 1 + ResponseVerifyVoteExtension_SLASH ResponseVerifyVoteExtension_Result = 2 + ResponseVerifyVoteExtension_REJECT ResponseVerifyVoteExtension_Result = 3 +) + +var ResponseVerifyVoteExtension_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "SLASH", + 3: "REJECT", +} + +var ResponseVerifyVoteExtension_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "SLASH": 2, + "REJECT": 3, +} + +func (x ResponseVerifyVoteExtension_Result) String() string { + return proto.EnumName(ResponseVerifyVoteExtension_Result_name, int32(x)) +} + +func (ResponseVerifyVoteExtension_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{37, 0} +} + +type ResponseProcessProposal_Result int32 + +const ( + ResponseProcessProposal_UNKNOWN ResponseProcessProposal_Result = 0 + ResponseProcessProposal_ACCEPT ResponseProcessProposal_Result = 1 + ResponseProcessProposal_REJECT ResponseProcessProposal_Result = 2 +) + +var ResponseProcessProposal_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseProcessProposal_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseProcessProposal_Result) String() string { + return proto.EnumName(ResponseProcessProposal_Result_name, int32(x)) +} + +func (ResponseProcessProposal_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{38, 0} } type Request struct { @@ -176,7 +235,10 @@ type Request struct { // *Request_OfferSnapshot // *Request_LoadSnapshotChunk // *Request_ApplySnapshotChunk - // *Request_PreprocessTxs + // *Request_PrepareProposal + // *Request_ProcessProposal + // *Request_ExtendVote + // *Request_VerifyVoteExtension Value isRequest_Value `protobuf_oneof:"value"` } @@ -261,25 +323,37 @@ type Request_LoadSnapshotChunk struct { type Request_ApplySnapshotChunk struct { ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,14,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } -type Request_PreprocessTxs struct { - PreprocessTxs *RequestPreprocessTxs `protobuf:"bytes,15,opt,name=preprocess_txs,json=preprocessTxs,proto3,oneof" json:"preprocess_txs,omitempty"` -} - -func (*Request_Echo) isRequest_Value() {} -func (*Request_Flush) isRequest_Value() {} -func (*Request_Info) isRequest_Value() {} -func (*Request_InitChain) isRequest_Value() {} -func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} -func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} -func (*Request_Commit) isRequest_Value() {} -func (*Request_ListSnapshots) isRequest_Value() {} -func (*Request_OfferSnapshot) isRequest_Value() {} -func (*Request_LoadSnapshotChunk) isRequest_Value() {} -func (*Request_ApplySnapshotChunk) isRequest_Value() {} -func (*Request_PreprocessTxs) isRequest_Value() {} +type Request_PrepareProposal struct { + PrepareProposal *RequestPrepareProposal `protobuf:"bytes,15,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Request_ProcessProposal struct { + ProcessProposal *RequestProcessProposal `protobuf:"bytes,16,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Request_ExtendVote struct { + ExtendVote *RequestExtendVote `protobuf:"bytes,17,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Request_VerifyVoteExtension struct { + VerifyVoteExtension *RequestVerifyVoteExtension `protobuf:"bytes,18,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} +func (*Request_PrepareProposal) isRequest_Value() {} +func (*Request_ProcessProposal) isRequest_Value() {} +func (*Request_ExtendVote) isRequest_Value() {} +func (*Request_VerifyVoteExtension) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -386,9 +460,30 @@ func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { return nil } -func (m *Request) GetPreprocessTxs() *RequestPreprocessTxs { - if x, ok := m.GetValue().(*Request_PreprocessTxs); ok { - return x.PreprocessTxs +func (m *Request) GetPrepareProposal() *RequestPrepareProposal { + if x, ok := m.GetValue().(*Request_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Request) GetProcessProposal() *RequestProcessProposal { + if x, ok := m.GetValue().(*Request_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +func (m *Request) GetExtendVote() *RequestExtendVote { + if x, ok := m.GetValue().(*Request_ExtendVote); ok { + return x.ExtendVote + } + return nil +} + +func (m *Request) GetVerifyVoteExtension() *RequestVerifyVoteExtension { + if x, ok := m.GetValue().(*Request_VerifyVoteExtension); ok { + return x.VerifyVoteExtension } return nil } @@ -410,7 +505,10 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_OfferSnapshot)(nil), (*Request_LoadSnapshotChunk)(nil), (*Request_ApplySnapshotChunk)(nil), - (*Request_PreprocessTxs)(nil), + (*Request_PrepareProposal)(nil), + (*Request_ProcessProposal)(nil), + (*Request_ExtendVote)(nil), + (*Request_VerifyVoteExtension)(nil), } } @@ -1170,22 +1268,169 @@ func (m *RequestApplySnapshotChunk) GetSender() string { return "" } -type RequestPreprocessTxs struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +type RequestPrepareProposal struct { + // block_data is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + // applications can not exceed the size of the data passed to it. + BlockData [][]byte `protobuf:"bytes,1,rep,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` + // If an application decides to populate block_data with extra information, they can not exceed this value. + BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` } -func (m *RequestPreprocessTxs) Reset() { *m = RequestPreprocessTxs{} } -func (m *RequestPreprocessTxs) String() string { return proto.CompactTextString(m) } -func (*RequestPreprocessTxs) ProtoMessage() {} -func (*RequestPreprocessTxs) Descriptor() ([]byte, []int) { +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{15} } -func (m *RequestPreprocessTxs) XXX_Unmarshal(b []byte) error { +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) +} +func (m *RequestPrepareProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo + +func (m *RequestPrepareProposal) GetBlockData() [][]byte { + if m != nil { + return m.BlockData + } + return nil +} + +func (m *RequestPrepareProposal) GetBlockDataSize() int64 { + if m != nil { + return m.BlockDataSize + } + return 0 +} + +// Extends a vote with application-side injection +type RequestExtendVote struct { + Vote *types1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` +} + +func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } +func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } +func (*RequestExtendVote) ProtoMessage() {} +func (*RequestExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{16} +} +func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestExtendVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestExtendVote.Merge(m, src) +} +func (m *RequestExtendVote) XXX_Size() int { + return m.Size() +} +func (m *RequestExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_RequestExtendVote.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestExtendVote proto.InternalMessageInfo + +func (m *RequestExtendVote) GetVote() *types1.Vote { + if m != nil { + return m.Vote + } + return nil +} + +// Verify the vote extension +type RequestVerifyVoteExtension struct { + Vote *types1.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` +} + +func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExtension{} } +func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*RequestVerifyVoteExtension) ProtoMessage() {} +func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{17} +} +func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestVerifyVoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestVerifyVoteExtension.Merge(m, src) +} +func (m *RequestVerifyVoteExtension) XXX_Size() int { + return m.Size() +} +func (m *RequestVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RequestVerifyVoteExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestVerifyVoteExtension proto.InternalMessageInfo + +func (m *RequestVerifyVoteExtension) GetVote() *types1.Vote { + if m != nil { + return m.Vote + } + return nil +} + +type RequestProcessProposal struct { + Header types1.Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } +func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } +func (*RequestProcessProposal) ProtoMessage() {} +func (*RequestProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{18} +} +func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RequestPreprocessTxs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RequestPreprocessTxs.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1195,19 +1440,26 @@ func (m *RequestPreprocessTxs) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *RequestPreprocessTxs) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPreprocessTxs.Merge(m, src) +func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestProcessProposal.Merge(m, src) } -func (m *RequestPreprocessTxs) XXX_Size() int { +func (m *RequestProcessProposal) XXX_Size() int { return m.Size() } -func (m *RequestPreprocessTxs) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPreprocessTxs.DiscardUnknown(m) +func (m *RequestProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) } -var xxx_messageInfo_RequestPreprocessTxs proto.InternalMessageInfo +var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo + +func (m *RequestProcessProposal) GetHeader() types1.Header { + if m != nil { + return m.Header + } + return types1.Header{} +} -func (m *RequestPreprocessTxs) GetTxs() [][]byte { +func (m *RequestProcessProposal) GetTxs() [][]byte { if m != nil { return m.Txs } @@ -1231,7 +1483,10 @@ type Response struct { // *Response_OfferSnapshot // *Response_LoadSnapshotChunk // *Response_ApplySnapshotChunk - // *Response_PreprocessTxs + // *Response_PrepareProposal + // *Response_ProcessProposal + // *Response_ExtendVote + // *Response_VerifyVoteExtension Value isResponse_Value `protobuf_oneof:"value"` } @@ -1239,7 +1494,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1319,26 +1574,38 @@ type Response_LoadSnapshotChunk struct { type Response_ApplySnapshotChunk struct { ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } -type Response_PreprocessTxs struct { - PreprocessTxs *ResponsePreprocessTxs `protobuf:"bytes,16,opt,name=preprocess_txs,json=preprocessTxs,proto3,oneof" json:"preprocess_txs,omitempty"` -} - -func (*Response_Exception) isResponse_Value() {} -func (*Response_Echo) isResponse_Value() {} -func (*Response_Flush) isResponse_Value() {} -func (*Response_Info) isResponse_Value() {} -func (*Response_InitChain) isResponse_Value() {} -func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} -func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} -func (*Response_Commit) isResponse_Value() {} -func (*Response_ListSnapshots) isResponse_Value() {} -func (*Response_OfferSnapshot) isResponse_Value() {} -func (*Response_LoadSnapshotChunk) isResponse_Value() {} -func (*Response_ApplySnapshotChunk) isResponse_Value() {} -func (*Response_PreprocessTxs) isResponse_Value() {} +type Response_PrepareProposal struct { + PrepareProposal *ResponsePrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Response_ProcessProposal struct { + ProcessProposal *ResponseProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Response_ExtendVote struct { + ExtendVote *ResponseExtendVote `protobuf:"bytes,18,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Response_VerifyVoteExtension struct { + VerifyVoteExtension *ResponseVerifyVoteExtension `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} +func (*Response_PrepareProposal) isResponse_Value() {} +func (*Response_ProcessProposal) isResponse_Value() {} +func (*Response_ExtendVote) isResponse_Value() {} +func (*Response_VerifyVoteExtension) isResponse_Value() {} func (m *Response) GetValue() isResponse_Value { if m != nil { @@ -1452,9 +1719,30 @@ func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { return nil } -func (m *Response) GetPreprocessTxs() *ResponsePreprocessTxs { - if x, ok := m.GetValue().(*Response_PreprocessTxs); ok { - return x.PreprocessTxs +func (m *Response) GetPrepareProposal() *ResponsePrepareProposal { + if x, ok := m.GetValue().(*Response_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Response) GetProcessProposal() *ResponseProcessProposal { + if x, ok := m.GetValue().(*Response_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +func (m *Response) GetExtendVote() *ResponseExtendVote { + if x, ok := m.GetValue().(*Response_ExtendVote); ok { + return x.ExtendVote + } + return nil +} + +func (m *Response) GetVerifyVoteExtension() *ResponseVerifyVoteExtension { + if x, ok := m.GetValue().(*Response_VerifyVoteExtension); ok { + return x.VerifyVoteExtension } return nil } @@ -1477,7 +1765,10 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_OfferSnapshot)(nil), (*Response_LoadSnapshotChunk)(nil), (*Response_ApplySnapshotChunk)(nil), - (*Response_PreprocessTxs)(nil), + (*Response_PrepareProposal)(nil), + (*Response_ProcessProposal)(nil), + (*Response_ExtendVote)(nil), + (*Response_VerifyVoteExtension)(nil), } } @@ -1490,7 +1781,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1534,7 +1825,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1577,7 +1868,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1619,7 +1910,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1693,7 +1984,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +2051,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1860,7 +2151,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{26} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1916,7 +2207,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{27} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2037,7 +2328,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} + return fileDescriptor_252557cfdd89a31a, []int{28} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2132,7 +2423,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} + return fileDescriptor_252557cfdd89a31a, []int{29} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2192,7 +2483,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} + return fileDescriptor_252557cfdd89a31a, []int{30} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2243,7 +2534,7 @@ func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } func (*ResponseListSnapshots) ProtoMessage() {} func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} + return fileDescriptor_252557cfdd89a31a, []int{31} } func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2287,7 +2578,7 @@ func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } func (*ResponseOfferSnapshot) ProtoMessage() {} func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{32} } func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2331,7 +2622,7 @@ func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotC func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseLoadSnapshotChunk) ProtoMessage() {} func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2377,7 +2668,7 @@ func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapsho func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } func (*ResponseApplySnapshotChunk) ProtoMessage() {} func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2427,23 +2718,22 @@ func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { return nil } -type ResponsePreprocessTxs struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` - Messages *types1.Messages `protobuf:"bytes,2,opt,name=messages,proto3" json:"messages,omitempty"` +type ResponsePrepareProposal struct { + BlockData [][]byte `protobuf:"bytes,1,rep,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` } -func (m *ResponsePreprocessTxs) Reset() { *m = ResponsePreprocessTxs{} } -func (m *ResponsePreprocessTxs) String() string { return proto.CompactTextString(m) } -func (*ResponsePreprocessTxs) ProtoMessage() {} -func (*ResponsePreprocessTxs) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} +func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } +func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } +func (*ResponsePrepareProposal) ProtoMessage() {} +func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{35} } -func (m *ResponsePreprocessTxs) XXX_Unmarshal(b []byte) error { +func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponsePreprocessTxs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponsePreprocessTxs.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2453,49 +2743,41 @@ func (m *ResponsePreprocessTxs) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponsePreprocessTxs) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePreprocessTxs.Merge(m, src) +func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) } -func (m *ResponsePreprocessTxs) XXX_Size() int { +func (m *ResponsePrepareProposal) XXX_Size() int { return m.Size() } -func (m *ResponsePreprocessTxs) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePreprocessTxs.DiscardUnknown(m) +func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) } -var xxx_messageInfo_ResponsePreprocessTxs proto.InternalMessageInfo - -func (m *ResponsePreprocessTxs) GetTxs() [][]byte { - if m != nil { - return m.Txs - } - return nil -} +var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo -func (m *ResponsePreprocessTxs) GetMessages() *types1.Messages { +func (m *ResponsePrepareProposal) GetBlockData() [][]byte { if m != nil { - return m.Messages + return m.BlockData } return nil } -type LastCommitInfo struct { - Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` - Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +type ResponseExtendVote struct { + VoteExtension *types1.VoteExtension `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } -func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } -func (*LastCommitInfo) ProtoMessage() {} -func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} +func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } +func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } +func (*ResponseExtendVote) ProtoMessage() {} +func (*ResponseExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{36} } -func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { +func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseExtendVote.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2505,52 +2787,41 @@ func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *LastCommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LastCommitInfo.Merge(m, src) +func (m *ResponseExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseExtendVote.Merge(m, src) } -func (m *LastCommitInfo) XXX_Size() int { +func (m *ResponseExtendVote) XXX_Size() int { return m.Size() } -func (m *LastCommitInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LastCommitInfo.DiscardUnknown(m) +func (m *ResponseExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseExtendVote.DiscardUnknown(m) } -var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo - -func (m *LastCommitInfo) GetRound() int32 { - if m != nil { - return m.Round - } - return 0 -} +var xxx_messageInfo_ResponseExtendVote proto.InternalMessageInfo -func (m *LastCommitInfo) GetVotes() []VoteInfo { +func (m *ResponseExtendVote) GetVoteExtension() *types1.VoteExtension { if m != nil { - return m.Votes + return m.VoteExtension } return nil } -// Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. -// Later, transactions may be queried using these events. -type Event struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +type ResponseVerifyVoteExtension struct { + Result ResponseVerifyVoteExtension_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseVerifyVoteExtension_Result" json:"result,omitempty"` } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} +func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteExtension{} } +func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*ResponseVerifyVoteExtension) ProtoMessage() {} +func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{37} } -func (m *Event) XXX_Unmarshal(b []byte) error { +func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseVerifyVoteExtension.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2560,46 +2831,198 @@ func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) +func (m *ResponseVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseVerifyVoteExtension.Merge(m, src) } -func (m *Event) XXX_Size() int { +func (m *ResponseVerifyVoteExtension) XXX_Size() int { return m.Size() } -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) +func (m *ResponseVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseVerifyVoteExtension.DiscardUnknown(m) } -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *Event) GetType() string { - if m != nil { - return m.Type - } - return "" -} +var xxx_messageInfo_ResponseVerifyVoteExtension proto.InternalMessageInfo -func (m *Event) GetAttributes() []EventAttribute { +func (m *ResponseVerifyVoteExtension) GetResult() ResponseVerifyVoteExtension_Result { if m != nil { - return m.Attributes + return m.Result } - return nil + return ResponseVerifyVoteExtension_UNKNOWN } -// EventAttribute is a single key-value pair, associated with an event. -type EventAttribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +type ResponseProcessProposal struct { + Result ResponseProcessProposal_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseProcessProposal_Result" json:"result,omitempty"` + Evidence [][]byte `protobuf:"bytes,2,rep,name=evidence,proto3" json:"evidence,omitempty"` } -func (m *EventAttribute) Reset() { *m = EventAttribute{} } -func (m *EventAttribute) String() string { return proto.CompactTextString(m) } -func (*EventAttribute) ProtoMessage() {} -func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} +func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } +func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } +func (*ResponseProcessProposal) ProtoMessage() {} +func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{38} } -func (m *EventAttribute) XXX_Unmarshal(b []byte) error { +func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseProcessProposal.Merge(m, src) +} +func (m *ResponseProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *ResponseProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo + +func (m *ResponseProcessProposal) GetResult() ResponseProcessProposal_Result { + if m != nil { + return m.Result + } + return ResponseProcessProposal_UNKNOWN +} + +func (m *ResponseProcessProposal) GetEvidence() [][]byte { + if m != nil { + return m.Evidence + } + return nil +} + +type LastCommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` +} + +func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } +func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } +func (*LastCommitInfo) ProtoMessage() {} +func (*LastCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{39} +} +func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LastCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastCommitInfo.Merge(m, src) +} +func (m *LastCommitInfo) XXX_Size() int { + return m.Size() +} +func (m *LastCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LastCommitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo + +func (m *LastCommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *LastCommitInfo) GetVotes() []VoteInfo { + if m != nil { + return m.Votes + } + return nil +} + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{40} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetAttributes() []EventAttribute { + if m != nil { + return m.Attributes + } + return nil +} + +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *EventAttribute) Reset() { *m = EventAttribute{} } +func (m *EventAttribute) String() string { return proto.CompactTextString(m) } +func (*EventAttribute) ProtoMessage() {} +func (*EventAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{41} +} +func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { @@ -2661,7 +3084,7 @@ func (m *TxResult) Reset() { *m = TxResult{} } func (m *TxResult) String() string { return proto.CompactTextString(m) } func (*TxResult) ProtoMessage() {} func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{42} } func (m *TxResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2729,7 +3152,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{43} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2782,7 +3205,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} + return fileDescriptor_252557cfdd89a31a, []int{44} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2835,7 +3258,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{45} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2896,7 +3319,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} + return fileDescriptor_252557cfdd89a31a, []int{46} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2972,7 +3395,7 @@ func (m *Snapshot) Reset() { *m = Snapshot{} } func (m *Snapshot) String() string { return proto.CompactTextString(m) } func (*Snapshot) ProtoMessage() {} func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} + return fileDescriptor_252557cfdd89a31a, []int{47} } func (m *Snapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3041,6 +3464,8 @@ func init() { proto.RegisterEnum("tendermint.abci.EvidenceType", EvidenceType_name, EvidenceType_value) proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseVerifyVoteExtension_Result", ResponseVerifyVoteExtension_Result_name, ResponseVerifyVoteExtension_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseProcessProposal_Result", ResponseProcessProposal_Result_name, ResponseProcessProposal_Result_value) proto.RegisterType((*Request)(nil), "tendermint.abci.Request") proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") @@ -3056,7 +3481,10 @@ func init() { proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") - proto.RegisterType((*RequestPreprocessTxs)(nil), "tendermint.abci.RequestPreprocessTxs") + proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") + proto.RegisterType((*RequestExtendVote)(nil), "tendermint.abci.RequestExtendVote") + proto.RegisterType((*RequestVerifyVoteExtension)(nil), "tendermint.abci.RequestVerifyVoteExtension") + proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") proto.RegisterType((*Response)(nil), "tendermint.abci.Response") proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") @@ -3073,7 +3501,10 @@ func init() { proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") - proto.RegisterType((*ResponsePreprocessTxs)(nil), "tendermint.abci.ResponsePreprocessTxs") + proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") + proto.RegisterType((*ResponseExtendVote)(nil), "tendermint.abci.ResponseExtendVote") + proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "tendermint.abci.ResponseVerifyVoteExtension") + proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") proto.RegisterType((*Event)(nil), "tendermint.abci.Event") proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") @@ -3088,177 +3519,197 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 2712 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0x23, 0xc5, - 0x15, 0xd7, 0xa7, 0x25, 0x3d, 0x7d, 0x58, 0xdb, 0xbb, 0x2c, 0x62, 0x58, 0xec, 0x65, 0x28, 0xc8, - 0xb2, 0x80, 0x1d, 0x4c, 0xb1, 0x81, 0x22, 0x1f, 0x58, 0x42, 0x1b, 0x99, 0x35, 0x96, 0xd3, 0xd6, - 0x2e, 0x45, 0x12, 0x76, 0x18, 0x69, 0xda, 0xd2, 0xb0, 0xd2, 0xcc, 0x30, 0xd3, 0x32, 0x36, 0xc7, - 0x54, 0x72, 0xa1, 0x52, 0x15, 0x8e, 0xb9, 0xf0, 0x7f, 0xe4, 0x94, 0x53, 0x0e, 0x1c, 0x72, 0xe0, - 0x98, 0x43, 0x8a, 0xa4, 0xd8, 0x5b, 0x6e, 0x39, 0xe5, 0x94, 0xaa, 0x54, 0x7f, 0xcc, 0x97, 0xa4, - 0x91, 0xe4, 0x90, 0x5b, 0x6e, 0xdd, 0x6f, 0xde, 0x7b, 0xea, 0x7e, 0xdd, 0xfd, 0x7b, 0xbf, 0x7e, - 0x2d, 0x78, 0x9a, 0x12, 0xcb, 0x20, 0xee, 0xc4, 0xb4, 0xe8, 0xae, 0xde, 0x1f, 0x98, 0xbb, 0xf4, - 0xc2, 0x21, 0xde, 0x8e, 0xe3, 0xda, 0xd4, 0x46, 0x9b, 0xe1, 0xc7, 0x1d, 0xf6, 0x51, 0x79, 0x26, - 0xa2, 0x3d, 0x70, 0x2f, 0x1c, 0x6a, 0xef, 0x3a, 0xae, 0x6d, 0x9f, 0x0a, 0x7d, 0xe5, 0x46, 0xe4, - 0x33, 0xf7, 0x13, 0xf5, 0x16, 0xfb, 0x2a, 0x8d, 0x1f, 0x91, 0x0b, 0xff, 0xeb, 0x33, 0x73, 0xb6, - 0x8e, 0xee, 0xea, 0x13, 0xff, 0xf3, 0xf6, 0xd0, 0xb6, 0x87, 0x63, 0xb2, 0xcb, 0x7b, 0xfd, 0xe9, - 0xe9, 0x2e, 0x35, 0x27, 0xc4, 0xa3, 0xfa, 0xc4, 0x91, 0x0a, 0xd7, 0x86, 0xf6, 0xd0, 0xe6, 0xcd, - 0x5d, 0xd6, 0x12, 0x52, 0xf5, 0xcb, 0x22, 0x14, 0x30, 0xf9, 0x64, 0x4a, 0x3c, 0x8a, 0xf6, 0x20, - 0x47, 0x06, 0x23, 0xbb, 0x91, 0xbe, 0x99, 0xbe, 0x55, 0xde, 0xbb, 0xb1, 0x33, 0x33, 0xb9, 0x1d, - 0xa9, 0xd7, 0x1e, 0x8c, 0xec, 0x4e, 0x0a, 0x73, 0x5d, 0xf4, 0x3a, 0xe4, 0x4f, 0xc7, 0x53, 0x6f, - 0xd4, 0xc8, 0x70, 0xa3, 0x67, 0x92, 0x8c, 0xee, 0x32, 0xa5, 0x4e, 0x0a, 0x0b, 0x6d, 0xf6, 0x53, - 0xa6, 0x75, 0x6a, 0x37, 0xb2, 0xcb, 0x7f, 0xea, 0xc0, 0x3a, 0xe5, 0x3f, 0xc5, 0x74, 0x51, 0x13, - 0xc0, 0xb4, 0x4c, 0xaa, 0x0d, 0x46, 0xba, 0x69, 0x35, 0x72, 0xdc, 0xf2, 0xd9, 0x64, 0x4b, 0x93, - 0xb6, 0x98, 0x62, 0x27, 0x85, 0x4b, 0xa6, 0xdf, 0x61, 0xc3, 0xfd, 0x64, 0x4a, 0xdc, 0x8b, 0x46, - 0x7e, 0xf9, 0x70, 0x7f, 0xc6, 0x94, 0xd8, 0x70, 0xb9, 0x36, 0x6a, 0x43, 0xb9, 0x4f, 0x86, 0xa6, - 0xa5, 0xf5, 0xc7, 0xf6, 0xe0, 0x51, 0x63, 0x83, 0x1b, 0xab, 0x49, 0xc6, 0x4d, 0xa6, 0xda, 0x64, - 0x9a, 0x9d, 0x14, 0x86, 0x7e, 0xd0, 0x43, 0x3f, 0x84, 0xe2, 0x60, 0x44, 0x06, 0x8f, 0x34, 0x7a, - 0xde, 0x28, 0x70, 0x1f, 0xdb, 0x49, 0x3e, 0x5a, 0x4c, 0xaf, 0x77, 0xde, 0x49, 0xe1, 0xc2, 0x40, - 0x34, 0xd9, 0xfc, 0x0d, 0x32, 0x36, 0xcf, 0x88, 0xcb, 0xec, 0x8b, 0xcb, 0xe7, 0xff, 0x8e, 0xd0, - 0xe4, 0x1e, 0x4a, 0x86, 0xdf, 0x41, 0x3f, 0x81, 0x12, 0xb1, 0x0c, 0x39, 0x8d, 0x12, 0x77, 0x71, - 0x33, 0x71, 0x9d, 0x2d, 0xc3, 0x9f, 0x44, 0x91, 0xc8, 0x36, 0x7a, 0x03, 0x36, 0x06, 0xf6, 0x64, - 0x62, 0xd2, 0x06, 0x70, 0xeb, 0xad, 0xc4, 0x09, 0x70, 0xad, 0x4e, 0x0a, 0x4b, 0x7d, 0x74, 0x04, - 0xb5, 0xb1, 0xe9, 0x51, 0xcd, 0xb3, 0x74, 0xc7, 0x1b, 0xd9, 0xd4, 0x6b, 0x94, 0xb9, 0x87, 0xe7, - 0x93, 0x3c, 0x1c, 0x9a, 0x1e, 0x3d, 0xf1, 0x95, 0x3b, 0x29, 0x5c, 0x1d, 0x47, 0x05, 0xcc, 0x9f, - 0x7d, 0x7a, 0x4a, 0xdc, 0xc0, 0x61, 0xa3, 0xb2, 0xdc, 0x5f, 0x97, 0x69, 0xfb, 0xf6, 0xcc, 0x9f, - 0x1d, 0x15, 0xa0, 0x5f, 0xc0, 0xd5, 0xb1, 0xad, 0x1b, 0x81, 0x3b, 0x6d, 0x30, 0x9a, 0x5a, 0x8f, - 0x1a, 0x55, 0xee, 0xf4, 0xc5, 0xc4, 0x41, 0xda, 0xba, 0xe1, 0xbb, 0x68, 0x31, 0x83, 0x4e, 0x0a, - 0x5f, 0x19, 0xcf, 0x0a, 0xd1, 0x43, 0xb8, 0xa6, 0x3b, 0xce, 0xf8, 0x62, 0xd6, 0x7b, 0x8d, 0x7b, - 0xbf, 0x9d, 0xe4, 0x7d, 0x9f, 0xd9, 0xcc, 0xba, 0x47, 0xfa, 0x9c, 0x94, 0x05, 0xc3, 0x71, 0x89, - 0xe3, 0xda, 0x03, 0xe2, 0x79, 0x1a, 0x3d, 0xf7, 0x1a, 0x9b, 0xcb, 0x83, 0x71, 0x1c, 0x68, 0xf7, - 0xce, 0x79, 0x70, 0x9d, 0xa8, 0xa0, 0x59, 0x80, 0xfc, 0x99, 0x3e, 0x9e, 0x12, 0xf5, 0x7b, 0x50, - 0x8e, 0x1c, 0x7b, 0xd4, 0x80, 0xc2, 0x84, 0x78, 0x9e, 0x3e, 0x24, 0x1c, 0x25, 0x4a, 0xd8, 0xef, - 0xaa, 0x35, 0xa8, 0x44, 0x8f, 0xba, 0xfa, 0x45, 0x3a, 0xb0, 0x64, 0xa7, 0x98, 0x59, 0x9e, 0x11, - 0xd7, 0x33, 0x6d, 0xcb, 0xb7, 0x94, 0x5d, 0xf4, 0x1c, 0x54, 0xf9, 0x7e, 0xd4, 0xfc, 0xef, 0x0c, - 0x4a, 0x72, 0xb8, 0xc2, 0x85, 0x0f, 0xa4, 0xd2, 0x36, 0x94, 0x9d, 0x3d, 0x27, 0x50, 0xc9, 0x72, - 0x15, 0x70, 0xf6, 0x1c, 0x5f, 0xe1, 0x59, 0xa8, 0xb0, 0xf9, 0x05, 0x1a, 0x39, 0xfe, 0x23, 0x65, - 0x26, 0x93, 0x2a, 0xea, 0x9f, 0x33, 0x50, 0x9f, 0x85, 0x07, 0xf4, 0x06, 0xe4, 0x18, 0x52, 0x4a, - 0xd0, 0x53, 0x76, 0x04, 0x8c, 0xee, 0xf8, 0x30, 0xba, 0xd3, 0xf3, 0x61, 0xb4, 0x59, 0xfc, 0xea, - 0x9b, 0xed, 0xd4, 0x17, 0x7f, 0xdb, 0x4e, 0x63, 0x6e, 0x81, 0x9e, 0x62, 0xa7, 0x59, 0x37, 0x2d, - 0xcd, 0x34, 0xf8, 0x90, 0x4b, 0xec, 0xa8, 0xea, 0xa6, 0x75, 0x60, 0xa0, 0x43, 0xa8, 0x0f, 0x6c, - 0xcb, 0x23, 0x96, 0x37, 0xf5, 0x34, 0x01, 0xd3, 0x12, 0xea, 0x62, 0x07, 0x56, 0x80, 0x7f, 0xcb, - 0xd7, 0x3c, 0xe6, 0x8a, 0x78, 0x73, 0x10, 0x17, 0xa0, 0xbb, 0x00, 0x67, 0xfa, 0xd8, 0x34, 0x74, - 0x6a, 0xbb, 0x5e, 0x23, 0x77, 0x33, 0xbb, 0xf0, 0xd4, 0x3e, 0xf0, 0x55, 0xee, 0x3b, 0x86, 0x4e, - 0x49, 0x33, 0xc7, 0x86, 0x8b, 0x23, 0x96, 0xe8, 0x05, 0xd8, 0xd4, 0x1d, 0x47, 0xf3, 0xa8, 0x4e, - 0x89, 0xd6, 0xbf, 0xa0, 0xc4, 0xe3, 0x30, 0x58, 0xc1, 0x55, 0xdd, 0x71, 0x4e, 0x98, 0xb4, 0xc9, - 0x84, 0xe8, 0x79, 0xa8, 0x31, 0xc4, 0x34, 0xf5, 0xb1, 0x36, 0x22, 0xe6, 0x70, 0x44, 0x39, 0xe0, - 0x65, 0x71, 0x55, 0x4a, 0x3b, 0x5c, 0xa8, 0x1a, 0xc1, 0x8a, 0x73, 0xb4, 0x44, 0x08, 0x72, 0x86, - 0x4e, 0x75, 0x1e, 0xc9, 0x0a, 0xe6, 0x6d, 0x26, 0x73, 0x74, 0x3a, 0x92, 0xf1, 0xe1, 0x6d, 0x74, - 0x1d, 0x36, 0xa4, 0xdb, 0x2c, 0x77, 0x2b, 0x7b, 0xe8, 0x1a, 0xe4, 0x1d, 0xd7, 0x3e, 0x23, 0x7c, - 0xe9, 0x8a, 0x58, 0x74, 0xd4, 0x5f, 0x67, 0xe0, 0xca, 0x1c, 0xae, 0x32, 0xbf, 0x23, 0xdd, 0x1b, - 0xf9, 0xbf, 0xc5, 0xda, 0xe8, 0x0e, 0xf3, 0xab, 0x1b, 0xc4, 0x95, 0xb9, 0xa8, 0x31, 0x1f, 0xea, - 0x0e, 0xff, 0x2e, 0x43, 0x23, 0xb5, 0x51, 0x17, 0xea, 0x63, 0xdd, 0xa3, 0x9a, 0xc0, 0x29, 0x2d, - 0x92, 0x97, 0xe6, 0xd1, 0xf9, 0x50, 0xf7, 0x91, 0x8d, 0x6d, 0x6a, 0xe9, 0xa8, 0x36, 0x8e, 0x49, - 0x11, 0x86, 0x6b, 0xfd, 0x8b, 0xcf, 0x74, 0x8b, 0x9a, 0x16, 0xd1, 0xe6, 0x56, 0xee, 0xa9, 0x39, - 0xa7, 0xed, 0x33, 0xd3, 0x20, 0xd6, 0xc0, 0x5f, 0xb2, 0xab, 0x81, 0x71, 0xb0, 0xa4, 0x9e, 0x8a, - 0xa1, 0x16, 0xcf, 0x0c, 0xa8, 0x06, 0x19, 0x7a, 0x2e, 0x03, 0x90, 0xa1, 0xe7, 0xe8, 0xfb, 0x90, - 0x63, 0x93, 0xe4, 0x93, 0xaf, 0x2d, 0x48, 0xa9, 0xd2, 0xae, 0x77, 0xe1, 0x10, 0xcc, 0x35, 0x55, - 0x35, 0x38, 0x0e, 0x41, 0xb6, 0x98, 0xf5, 0xaa, 0xbe, 0x08, 0x9b, 0x33, 0xe9, 0x20, 0xb2, 0x7e, - 0xe9, 0xe8, 0xfa, 0xa9, 0x9b, 0x50, 0x8d, 0x61, 0xbf, 0x7a, 0x1d, 0xae, 0x2d, 0x82, 0x72, 0x75, - 0x14, 0xc8, 0x63, 0x90, 0x8c, 0x5e, 0x87, 0x62, 0x80, 0xe5, 0xe2, 0x38, 0xce, 0xc7, 0xca, 0x57, - 0xc6, 0x81, 0x2a, 0x3b, 0x87, 0x6c, 0x5b, 0xf3, 0xfd, 0x90, 0xe1, 0x03, 0x2f, 0xe8, 0x8e, 0xd3, - 0xd1, 0xbd, 0x91, 0xfa, 0x11, 0x34, 0x92, 0x70, 0x7a, 0x66, 0x1a, 0xb9, 0x60, 0x1b, 0x5e, 0x87, - 0x8d, 0x53, 0xdb, 0x9d, 0xe8, 0x94, 0x3b, 0xab, 0x62, 0xd9, 0x63, 0xdb, 0x53, 0x60, 0x76, 0x96, - 0x8b, 0x45, 0x47, 0xd5, 0xe0, 0xa9, 0x44, 0xac, 0x66, 0x26, 0xa6, 0x65, 0x10, 0x11, 0xcf, 0x2a, - 0x16, 0x9d, 0xd0, 0x91, 0x18, 0xac, 0xe8, 0xb0, 0x9f, 0xf5, 0xf8, 0x5c, 0xb9, 0xff, 0x12, 0x96, - 0x3d, 0xf5, 0x56, 0x10, 0xac, 0x18, 0x64, 0xa3, 0x3a, 0x64, 0x19, 0xcc, 0xa7, 0x6f, 0x66, 0x6f, - 0x55, 0x30, 0x6b, 0xaa, 0xff, 0x2c, 0x42, 0x11, 0x13, 0xcf, 0x61, 0xe8, 0x81, 0x9a, 0x50, 0x22, - 0xe7, 0x03, 0xe2, 0x50, 0x1f, 0x70, 0x17, 0xf3, 0x15, 0xa1, 0xdd, 0xf6, 0x35, 0x19, 0x59, 0x08, - 0xcc, 0xd0, 0x6b, 0x92, 0x0f, 0x26, 0x53, 0x3b, 0x69, 0x1e, 0x25, 0x84, 0x77, 0x7c, 0x42, 0x98, - 0x4d, 0xe4, 0x07, 0xc2, 0x6a, 0x86, 0x11, 0xbe, 0x26, 0x19, 0x61, 0x6e, 0xc5, 0x8f, 0xc5, 0x28, - 0x61, 0x2b, 0x46, 0x09, 0xf3, 0x2b, 0xa6, 0x99, 0xc0, 0x09, 0xef, 0xf8, 0x9c, 0x70, 0x63, 0xc5, - 0x88, 0x67, 0x48, 0xe1, 0xdd, 0x38, 0x29, 0x14, 0x84, 0xee, 0xb9, 0x44, 0xeb, 0x44, 0x56, 0xf8, - 0xa3, 0x08, 0x2b, 0x2c, 0x26, 0x52, 0x32, 0xe1, 0x64, 0x01, 0x2d, 0x6c, 0xc5, 0x68, 0x61, 0x69, - 0x45, 0x0c, 0x12, 0x78, 0xe1, 0xdb, 0x51, 0x5e, 0x08, 0x89, 0xd4, 0x52, 0xae, 0xf7, 0x22, 0x62, - 0xf8, 0x66, 0x40, 0x0c, 0xcb, 0x89, 0xcc, 0x56, 0xce, 0x61, 0x96, 0x19, 0x76, 0xe7, 0x98, 0xa1, - 0x60, 0x72, 0x2f, 0x24, 0xba, 0x58, 0x41, 0x0d, 0xbb, 0x73, 0xd4, 0xb0, 0xba, 0xc2, 0xe1, 0x0a, - 0x6e, 0xf8, 0xcb, 0xc5, 0xdc, 0x30, 0x99, 0xbd, 0xc9, 0x61, 0xae, 0x47, 0x0e, 0xb5, 0x04, 0x72, - 0x28, 0x28, 0xdc, 0x4b, 0x89, 0xee, 0xd7, 0x66, 0x87, 0xdd, 0x39, 0x76, 0x58, 0x5f, 0x11, 0x8f, - 0x75, 0xe9, 0xe1, 0x8b, 0x2c, 0x39, 0xcf, 0x80, 0x08, 0x03, 0x38, 0xe2, 0xba, 0xb6, 0x2b, 0x89, - 0x9e, 0xe8, 0xa8, 0xb7, 0x18, 0x5d, 0x08, 0x01, 0x63, 0x09, 0x95, 0xe4, 0x89, 0x24, 0x02, 0x12, - 0xea, 0x1f, 0xd2, 0xa1, 0x2d, 0xcf, 0xb0, 0x51, 0xaa, 0x51, 0x92, 0x54, 0x23, 0x42, 0x30, 0x33, - 0x71, 0x82, 0xb9, 0x0d, 0x65, 0x96, 0x20, 0x66, 0xb8, 0xa3, 0xee, 0x04, 0xdc, 0xf1, 0x36, 0x5c, - 0xe1, 0x0c, 0x40, 0xd0, 0x50, 0x99, 0x15, 0x72, 0x3c, 0xb9, 0x6d, 0xb2, 0x0f, 0x62, 0xb7, 0x8b, - 0xf4, 0xf0, 0x0a, 0x5c, 0x8d, 0xe8, 0x06, 0x89, 0x47, 0x10, 0xa9, 0x7a, 0xa0, 0xbd, 0x2f, 0x33, - 0xd0, 0x9f, 0xd2, 0x61, 0x84, 0x42, 0xd2, 0xb9, 0x88, 0x1f, 0xa6, 0xff, 0x47, 0xfc, 0x30, 0xf3, - 0x5f, 0xf3, 0xc3, 0x68, 0x22, 0xcd, 0xc6, 0x13, 0xe9, 0xbf, 0xd2, 0xe1, 0x9a, 0x04, 0x6c, 0x6f, - 0x60, 0x1b, 0x44, 0xa6, 0x36, 0xde, 0x66, 0x39, 0x69, 0x6c, 0x0f, 0x65, 0x02, 0x63, 0x4d, 0xa6, - 0x15, 0xa0, 0x7a, 0x49, 0x82, 0x76, 0x90, 0x15, 0xf3, 0x3c, 0xc2, 0x32, 0x2b, 0xd6, 0x21, 0xfb, - 0x88, 0x08, 0x0c, 0xae, 0x60, 0xd6, 0x64, 0x7a, 0x7c, 0x93, 0x71, 0x64, 0xad, 0x60, 0xd1, 0x41, - 0x6f, 0x40, 0x89, 0x57, 0x54, 0x34, 0xdb, 0xf1, 0x24, 0x5c, 0x3e, 0x1d, 0x9d, 0xab, 0x28, 0x9c, - 0xec, 0x1c, 0x33, 0x9d, 0xae, 0xe3, 0xe1, 0xa2, 0x23, 0x5b, 0x91, 0x84, 0x5f, 0x8a, 0xf1, 0xce, - 0x1b, 0x50, 0x62, 0xa3, 0xf7, 0x1c, 0x7d, 0x40, 0x38, 0xf6, 0x95, 0x70, 0x28, 0x50, 0x1f, 0x02, - 0x9a, 0x47, 0x70, 0xd4, 0x81, 0x0d, 0x72, 0x46, 0x2c, 0x2a, 0x12, 0x70, 0x79, 0xef, 0xfa, 0x02, - 0x52, 0x47, 0x2c, 0xda, 0x6c, 0xb0, 0x20, 0xff, 0xe3, 0x9b, 0xed, 0xba, 0xd0, 0x7e, 0xd9, 0x9e, - 0x98, 0x94, 0x4c, 0x1c, 0x7a, 0x81, 0xa5, 0xbd, 0xfa, 0xd7, 0x0c, 0x63, 0x58, 0x31, 0x74, 0x5f, - 0x18, 0x5b, 0x7f, 0xcb, 0x67, 0x22, 0xec, 0x7a, 0xbd, 0x78, 0x6f, 0x01, 0x0c, 0x75, 0x4f, 0xfb, - 0x54, 0xb7, 0x28, 0x31, 0x64, 0xd0, 0x23, 0x12, 0xa4, 0x40, 0x91, 0xf5, 0xa6, 0x1e, 0x31, 0x24, - 0xd1, 0x0f, 0xfa, 0x91, 0x79, 0x16, 0xbe, 0xdb, 0x3c, 0xe3, 0x51, 0x2e, 0xce, 0x44, 0x39, 0xc2, - 0x7e, 0x4a, 0x51, 0xf6, 0xc3, 0xc6, 0xe6, 0xb8, 0xa6, 0xed, 0x9a, 0xf4, 0x82, 0x2f, 0x4d, 0x16, - 0x07, 0x7d, 0x76, 0x6f, 0x9c, 0x90, 0x89, 0x63, 0xdb, 0x63, 0x4d, 0xc0, 0x4d, 0x99, 0x9b, 0x56, - 0xa4, 0xb0, 0xcd, 0x51, 0xe7, 0x37, 0x99, 0xf0, 0xfc, 0x85, 0x2c, 0xf7, 0xff, 0x2e, 0xc0, 0xea, - 0x6f, 0xf9, 0xdd, 0x37, 0x9e, 0xbf, 0xd1, 0x09, 0x5c, 0x09, 0x8e, 0xbf, 0x36, 0xe5, 0xb0, 0xe0, - 0x6f, 0xe8, 0x75, 0xf1, 0xa3, 0x7e, 0x16, 0x17, 0x7b, 0xe8, 0x03, 0x78, 0x72, 0x06, 0xdb, 0x02, - 0xd7, 0x99, 0x75, 0x21, 0xee, 0x89, 0x38, 0xc4, 0xf9, 0xae, 0xc3, 0x60, 0x65, 0xbf, 0xe3, 0xa9, - 0x3b, 0x60, 0xd7, 0xa9, 0x28, 0x1d, 0x59, 0xb8, 0xfc, 0xcf, 0x41, 0xd5, 0x25, 0x94, 0x5d, 0xf1, - 0x63, 0x17, 0xd6, 0x8a, 0x10, 0xca, 0x6b, 0xf0, 0x31, 0x3c, 0xb1, 0x90, 0x96, 0xa0, 0x1f, 0x40, - 0x29, 0x64, 0x34, 0xe9, 0x84, 0xbb, 0x5f, 0x70, 0x9f, 0x09, 0x75, 0xd5, 0x3f, 0xa6, 0x43, 0x97, - 0xf1, 0x1b, 0x52, 0x1b, 0x36, 0x5c, 0xe2, 0x4d, 0xc7, 0xe2, 0xce, 0x52, 0xdb, 0x7b, 0x65, 0x3d, - 0x42, 0xc3, 0xa4, 0xd3, 0x31, 0xc5, 0xd2, 0x58, 0x7d, 0x08, 0x1b, 0x42, 0x82, 0xca, 0x50, 0xb8, - 0x7f, 0x74, 0xef, 0xa8, 0xfb, 0xfe, 0x51, 0x3d, 0x85, 0x00, 0x36, 0xf6, 0x5b, 0xad, 0xf6, 0x71, - 0xaf, 0x9e, 0x46, 0x25, 0xc8, 0xef, 0x37, 0xbb, 0xb8, 0x57, 0xcf, 0x30, 0x31, 0x6e, 0xbf, 0xdb, - 0x6e, 0xf5, 0xea, 0x59, 0x74, 0x05, 0xaa, 0xa2, 0xad, 0xdd, 0xed, 0xe2, 0xf7, 0xf6, 0x7b, 0xf5, - 0x5c, 0x44, 0x74, 0xd2, 0x3e, 0x7a, 0xa7, 0x8d, 0xeb, 0x79, 0xf5, 0x55, 0x76, 0x29, 0x4a, 0xa0, - 0x40, 0xe1, 0xf5, 0x27, 0x1d, 0xb9, 0xfe, 0xa8, 0xbf, 0xcf, 0x80, 0x92, 0xcc, 0x6b, 0xd0, 0xbb, - 0x33, 0x13, 0xdf, 0xbb, 0x04, 0x29, 0x9a, 0x99, 0x3d, 0x7a, 0x1e, 0x6a, 0x2e, 0x39, 0x25, 0x74, - 0x30, 0x12, 0x3c, 0x4b, 0xa4, 0xcc, 0x2a, 0xae, 0x4a, 0x29, 0x37, 0xf2, 0x84, 0xda, 0xc7, 0x64, - 0x40, 0x35, 0x81, 0x45, 0x62, 0xd3, 0x95, 0x98, 0x1a, 0x93, 0x9e, 0x08, 0xa1, 0xfa, 0xd1, 0xa5, - 0x62, 0x59, 0x82, 0x3c, 0x6e, 0xf7, 0xf0, 0x07, 0xf5, 0x2c, 0x42, 0x50, 0xe3, 0x4d, 0xed, 0xe4, - 0x68, 0xff, 0xf8, 0xa4, 0xd3, 0x65, 0xb1, 0xbc, 0x0a, 0x9b, 0x7e, 0x2c, 0x7d, 0x61, 0x5e, 0xd5, - 0xc3, 0xdd, 0xb0, 0xe2, 0x0a, 0x88, 0xee, 0x40, 0x51, 0x92, 0x28, 0xff, 0xb0, 0x29, 0xf3, 0x87, - 0xed, 0x3d, 0xa9, 0x81, 0x03, 0x5d, 0xf5, 0x43, 0xa8, 0xc5, 0x2b, 0x1b, 0x6c, 0x95, 0x5c, 0x7b, - 0x6a, 0x19, 0x3c, 0xde, 0x79, 0x2c, 0x3a, 0xe8, 0x75, 0xc8, 0x9f, 0xd9, 0xe2, 0x24, 0x2f, 0xde, - 0xce, 0x0f, 0x6c, 0x4a, 0x22, 0x95, 0x11, 0xa1, 0xad, 0x7e, 0x06, 0x79, 0x7e, 0x30, 0xd9, 0x21, - 0xe3, 0x35, 0x0a, 0xc9, 0xdb, 0x58, 0x1b, 0x7d, 0x08, 0xa0, 0x53, 0xea, 0x9a, 0xfd, 0x69, 0xe8, - 0x78, 0x7b, 0xf1, 0xc1, 0xde, 0xf7, 0xf5, 0x9a, 0x37, 0xe4, 0x09, 0xbf, 0x16, 0x9a, 0x46, 0x4e, - 0x79, 0xc4, 0xa1, 0x7a, 0x04, 0xb5, 0xb8, 0xad, 0xcf, 0x34, 0xc4, 0x18, 0xe2, 0x4c, 0x43, 0x10, - 0x47, 0xc9, 0x34, 0x02, 0x9e, 0x92, 0x15, 0xf5, 0x28, 0xde, 0x51, 0x3f, 0x4f, 0x43, 0xb1, 0x77, - 0x2e, 0x97, 0x3c, 0xa1, 0x14, 0x12, 0x9a, 0x66, 0xa2, 0x17, 0x7f, 0x51, 0x5b, 0xc9, 0x06, 0x15, - 0x9b, 0xb7, 0x83, 0x4d, 0x9d, 0x5b, 0xf7, 0xd6, 0xe6, 0x97, 0xae, 0xe4, 0x41, 0x7e, 0x0b, 0x4a, - 0x01, 0x2c, 0x33, 0x02, 0xac, 0x1b, 0x86, 0x4b, 0x3c, 0x4f, 0x1e, 0x2d, 0xbf, 0xcb, 0x2b, 0x6b, - 0xf6, 0xa7, 0xb2, 0xb4, 0x90, 0xc5, 0xa2, 0xa3, 0x1a, 0xb0, 0x39, 0x83, 0xe9, 0xe8, 0x2d, 0x28, - 0x38, 0xd3, 0xbe, 0xe6, 0x87, 0x67, 0xe6, 0x65, 0xc6, 0xa7, 0x56, 0xd3, 0xfe, 0xd8, 0x1c, 0xdc, - 0x23, 0x17, 0xfe, 0x60, 0x9c, 0x69, 0xff, 0x9e, 0x88, 0xa2, 0xf8, 0x95, 0x4c, 0xf4, 0x57, 0xce, - 0xa0, 0xe8, 0x6f, 0x0a, 0xf4, 0x63, 0x28, 0x05, 0xe9, 0x22, 0x28, 0xb8, 0x26, 0xe6, 0x19, 0xe9, - 0x3e, 0x34, 0x61, 0x3c, 0xdd, 0x33, 0x87, 0x16, 0x31, 0xb4, 0x90, 0x82, 0xf3, 0x5f, 0x2b, 0xe2, - 0x4d, 0xf1, 0xe1, 0xd0, 0xe7, 0xdf, 0xea, 0xbf, 0xd3, 0x50, 0xf4, 0x0b, 0x6b, 0xe8, 0xd5, 0xc8, - 0xbe, 0xab, 0x2d, 0x28, 0x2e, 0xf8, 0x8a, 0x61, 0x71, 0x2c, 0x3e, 0xd6, 0xcc, 0xe5, 0xc7, 0x9a, - 0x54, 0xe5, 0xf4, 0xeb, 0xcd, 0xb9, 0x4b, 0xd7, 0x9b, 0x5f, 0x06, 0x44, 0x6d, 0xaa, 0x8f, 0xb5, - 0x33, 0x9b, 0x9a, 0xd6, 0x50, 0x13, 0xc1, 0x16, 0x74, 0xa3, 0xce, 0xbf, 0x3c, 0xe0, 0x1f, 0x8e, - 0x79, 0xdc, 0x7f, 0x95, 0x86, 0x62, 0x90, 0x37, 0x2e, 0x5b, 0xeb, 0xba, 0x0e, 0x1b, 0x12, 0x1a, - 0x45, 0xb1, 0x4b, 0xf6, 0x82, 0xb2, 0x6b, 0x2e, 0x52, 0x76, 0x55, 0x18, 0xe6, 0x50, 0x9d, 0x27, - 0x4f, 0x71, 0x0b, 0x0a, 0xfa, 0xb7, 0xdf, 0x84, 0x72, 0xa4, 0xec, 0xc8, 0x4e, 0xde, 0x51, 0xfb, - 0xfd, 0x7a, 0x4a, 0x29, 0x7c, 0xfe, 0xe5, 0xcd, 0xec, 0x11, 0xf9, 0x94, 0xed, 0x59, 0xdc, 0x6e, - 0x75, 0xda, 0xad, 0x7b, 0xf5, 0xb4, 0x52, 0xfe, 0xfc, 0xcb, 0x9b, 0x05, 0x4c, 0x78, 0x61, 0xe3, - 0x76, 0x07, 0x2a, 0xd1, 0x55, 0x89, 0xa3, 0x2b, 0x82, 0xda, 0x3b, 0xf7, 0x8f, 0x0f, 0x0f, 0x5a, - 0xfb, 0xbd, 0xb6, 0xf6, 0xa0, 0xdb, 0x6b, 0xd7, 0xd3, 0xe8, 0x49, 0xb8, 0x7a, 0x78, 0xf0, 0xd3, - 0x4e, 0x4f, 0x6b, 0x1d, 0x1e, 0xb4, 0x8f, 0x7a, 0xda, 0x7e, 0xaf, 0xb7, 0xdf, 0xba, 0x57, 0xcf, - 0xec, 0xfd, 0x0e, 0x60, 0x73, 0xbf, 0xd9, 0x3a, 0x60, 0x99, 0xc1, 0x1c, 0xe8, 0xfc, 0x8a, 0xda, - 0x82, 0x1c, 0xbf, 0x84, 0x2e, 0x7d, 0xe4, 0x54, 0x96, 0x97, 0xbc, 0xd0, 0x5d, 0xc8, 0xf3, 0xfb, - 0x29, 0x5a, 0xfe, 0xea, 0xa9, 0xac, 0xa8, 0x81, 0xb1, 0xc1, 0xf0, 0xe3, 0xb1, 0xf4, 0x19, 0x54, - 0x59, 0x5e, 0x12, 0x43, 0x18, 0x4a, 0x21, 0xbf, 0x5d, 0xfd, 0x2c, 0xa8, 0xac, 0x01, 0x36, 0xe8, - 0x10, 0x0a, 0xfe, 0x95, 0x64, 0xd5, 0x43, 0xa5, 0xb2, 0xb2, 0x66, 0xc5, 0xc2, 0x25, 0xae, 0x8e, - 0xcb, 0x5f, 0x5d, 0x95, 0x15, 0x05, 0x38, 0x74, 0x00, 0x1b, 0x92, 0xb3, 0xad, 0x78, 0x7c, 0x54, - 0x56, 0xd5, 0xa0, 0x58, 0xd0, 0xc2, 0x4b, 0xf9, 0xea, 0xb7, 0x64, 0x65, 0x8d, 0xda, 0x22, 0xba, - 0x0f, 0x10, 0xb9, 0x28, 0xae, 0xf1, 0x48, 0xac, 0xac, 0x53, 0x33, 0x44, 0x5d, 0x28, 0x06, 0xbc, - 0x7d, 0xe5, 0x93, 0xad, 0xb2, 0xba, 0x78, 0x87, 0x1e, 0x42, 0x35, 0xce, 0x57, 0xd7, 0x7b, 0x88, - 0x55, 0xd6, 0xac, 0xca, 0x31, 0xff, 0x71, 0xf2, 0xba, 0xde, 0xc3, 0xac, 0xb2, 0x66, 0x91, 0x0e, - 0x7d, 0x0c, 0x57, 0xe6, 0xc9, 0xe5, 0xfa, 0xef, 0xb4, 0xca, 0x25, 0xca, 0x76, 0x68, 0x02, 0x68, - 0x01, 0x29, 0xbd, 0xc4, 0xb3, 0xad, 0x72, 0x99, 0x2a, 0x1e, 0x0b, 0x5d, 0x9c, 0xe9, 0xad, 0xf7, - 0x8c, 0xab, 0xac, 0x59, 0xcf, 0x6b, 0xb6, 0xbf, 0xfa, 0x76, 0x2b, 0xfd, 0xf5, 0xb7, 0x5b, 0xe9, - 0xbf, 0x7f, 0xbb, 0x95, 0xfe, 0xe2, 0xf1, 0x56, 0xea, 0xeb, 0xc7, 0x5b, 0xa9, 0xbf, 0x3c, 0xde, - 0x4a, 0xfd, 0xfc, 0xa5, 0xa1, 0x49, 0x47, 0xd3, 0xfe, 0xce, 0xc0, 0x9e, 0xec, 0x46, 0xff, 0x6f, - 0xb2, 0xe8, 0x3f, 0x30, 0xfd, 0x0d, 0x9e, 0xb4, 0x5e, 0xfb, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x4a, 0xe7, 0x9d, 0x11, 0x23, 0x23, 0x00, 0x00, + // 3029 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0x23, 0xd5, + 0xb5, 0xd7, 0xa7, 0x2d, 0x1d, 0xeb, 0xcb, 0xd7, 0xc3, 0x20, 0x9a, 0x19, 0x7b, 0xe8, 0x29, 0x60, + 0x18, 0xc0, 0x7e, 0x78, 0x0a, 0xde, 0x50, 0xbc, 0xf7, 0xc0, 0xd2, 0xc8, 0xc8, 0x8c, 0x9f, 0xed, + 0x5c, 0x6b, 0x86, 0x22, 0x81, 0x69, 0x5a, 0xd2, 0xb5, 0xd5, 0x8c, 0xd4, 0xdd, 0x74, 0xb7, 0x84, + 0x3d, 0xcb, 0x54, 0xb2, 0xa1, 0xb2, 0x60, 0x99, 0x0d, 0xab, 0x64, 0x9b, 0x7d, 0x56, 0x59, 0xb1, + 0x60, 0x91, 0x05, 0xcb, 0x2c, 0x52, 0x24, 0x05, 0xbb, 0xfc, 0x03, 0xa9, 0x4a, 0x55, 0xaa, 0x52, + 0xf7, 0xa3, 0x3f, 0xa5, 0x96, 0x5a, 0x90, 0x5d, 0x76, 0x7d, 0x4f, 0x9f, 0x73, 0xfa, 0x7e, 0x9e, + 0xf3, 0xfb, 0x9d, 0xbe, 0xf0, 0xac, 0x43, 0xf4, 0x3e, 0xb1, 0x46, 0x9a, 0xee, 0xec, 0xa8, 0xdd, + 0x9e, 0xb6, 0xe3, 0x5c, 0x9a, 0xc4, 0xde, 0x36, 0x2d, 0xc3, 0x31, 0x50, 0xd5, 0x7f, 0xb9, 0x4d, + 0x5f, 0x4a, 0xd7, 0x03, 0xda, 0x3d, 0xeb, 0xd2, 0x74, 0x8c, 0x1d, 0xd3, 0x32, 0x8c, 0x33, 0xae, + 0x2f, 0x5d, 0x0b, 0xbc, 0x66, 0x7e, 0x82, 0xde, 0x42, 0x6f, 0x85, 0xf1, 0x63, 0x72, 0xe9, 0xbe, + 0xbd, 0x3e, 0x65, 0x6b, 0xaa, 0x96, 0x3a, 0x72, 0x5f, 0x6f, 0x9d, 0x1b, 0xc6, 0xf9, 0x90, 0xec, + 0xb0, 0x56, 0x77, 0x7c, 0xb6, 0xe3, 0x68, 0x23, 0x62, 0x3b, 0xea, 0xc8, 0x14, 0x0a, 0x57, 0xce, + 0x8d, 0x73, 0x83, 0x3d, 0xee, 0xd0, 0x27, 0x2e, 0x95, 0x7f, 0x03, 0xb0, 0x8a, 0xc9, 0xa7, 0x63, + 0x62, 0x3b, 0x68, 0x17, 0x72, 0xa4, 0x37, 0x30, 0xea, 0xe9, 0x1b, 0xe9, 0x5b, 0x6b, 0xbb, 0xd7, + 0xb6, 0x23, 0x83, 0xdb, 0x16, 0x7a, 0xad, 0xde, 0xc0, 0x68, 0xa7, 0x30, 0xd3, 0x45, 0xaf, 0x43, + 0xfe, 0x6c, 0x38, 0xb6, 0x07, 0xf5, 0x0c, 0x33, 0xba, 0x1e, 0x67, 0xb4, 0x4f, 0x95, 0xda, 0x29, + 0xcc, 0xb5, 0xe9, 0xa7, 0x34, 0xfd, 0xcc, 0xa8, 0x67, 0xe7, 0x7f, 0xea, 0x40, 0x3f, 0x63, 0x9f, + 0xa2, 0xba, 0xa8, 0x01, 0xa0, 0xe9, 0x9a, 0xa3, 0xf4, 0x06, 0xaa, 0xa6, 0xd7, 0x73, 0xcc, 0xf2, + 0xb9, 0x78, 0x4b, 0xcd, 0x69, 0x52, 0xc5, 0x76, 0x0a, 0x17, 0x35, 0xb7, 0x41, 0xbb, 0xfb, 0xe9, + 0x98, 0x58, 0x97, 0xf5, 0xfc, 0xfc, 0xee, 0xfe, 0x84, 0x2a, 0xd1, 0xee, 0x32, 0x6d, 0xd4, 0x82, + 0xb5, 0x2e, 0x39, 0xd7, 0x74, 0xa5, 0x3b, 0x34, 0x7a, 0x8f, 0xeb, 0x2b, 0xcc, 0x58, 0x8e, 0x33, + 0x6e, 0x50, 0xd5, 0x06, 0xd5, 0x6c, 0xa7, 0x30, 0x74, 0xbd, 0x16, 0xfa, 0x1f, 0x28, 0xf4, 0x06, + 0xa4, 0xf7, 0x58, 0x71, 0x2e, 0xea, 0xab, 0xcc, 0xc7, 0x56, 0x9c, 0x8f, 0x26, 0xd5, 0xeb, 0x5c, + 0xb4, 0x53, 0x78, 0xb5, 0xc7, 0x1f, 0xe9, 0xf8, 0xfb, 0x64, 0xa8, 0x4d, 0x88, 0x45, 0xed, 0x0b, + 0xf3, 0xc7, 0x7f, 0x8f, 0x6b, 0x32, 0x0f, 0xc5, 0xbe, 0xdb, 0x40, 0x6f, 0x43, 0x91, 0xe8, 0x7d, + 0x31, 0x8c, 0x22, 0x73, 0x71, 0x23, 0x76, 0x9d, 0xf5, 0xbe, 0x3b, 0x88, 0x02, 0x11, 0xcf, 0xe8, + 0x2e, 0xac, 0xf4, 0x8c, 0xd1, 0x48, 0x73, 0xea, 0xc0, 0xac, 0x37, 0x63, 0x07, 0xc0, 0xb4, 0xda, + 0x29, 0x2c, 0xf4, 0xd1, 0x11, 0x54, 0x86, 0x9a, 0xed, 0x28, 0xb6, 0xae, 0x9a, 0xf6, 0xc0, 0x70, + 0xec, 0xfa, 0x1a, 0xf3, 0xf0, 0x7c, 0x9c, 0x87, 0x43, 0xcd, 0x76, 0x4e, 0x5d, 0xe5, 0x76, 0x0a, + 0x97, 0x87, 0x41, 0x01, 0xf5, 0x67, 0x9c, 0x9d, 0x11, 0xcb, 0x73, 0x58, 0x2f, 0xcd, 0xf7, 0x77, + 0x4c, 0xb5, 0x5d, 0x7b, 0xea, 0xcf, 0x08, 0x0a, 0xd0, 0xcf, 0x60, 0x63, 0x68, 0xa8, 0x7d, 0xcf, + 0x9d, 0xd2, 0x1b, 0x8c, 0xf5, 0xc7, 0xf5, 0x32, 0x73, 0xfa, 0x52, 0x6c, 0x27, 0x0d, 0xb5, 0xef, + 0xba, 0x68, 0x52, 0x83, 0x76, 0x0a, 0xaf, 0x0f, 0xa3, 0x42, 0xf4, 0x08, 0xae, 0xa8, 0xa6, 0x39, + 0xbc, 0x8c, 0x7a, 0xaf, 0x30, 0xef, 0xb7, 0xe3, 0xbc, 0xef, 0x51, 0x9b, 0xa8, 0x7b, 0xa4, 0x4e, + 0x49, 0x51, 0x07, 0x6a, 0xa6, 0x45, 0x4c, 0xd5, 0x22, 0x8a, 0x69, 0x19, 0xa6, 0x61, 0xab, 0xc3, + 0x7a, 0x95, 0xf9, 0x7e, 0x31, 0xce, 0xf7, 0x09, 0xd7, 0x3f, 0x11, 0xea, 0xed, 0x14, 0xae, 0x9a, + 0x61, 0x11, 0xf7, 0x6a, 0xf4, 0x88, 0x6d, 0xfb, 0x5e, 0x6b, 0x8b, 0xbc, 0x32, 0xfd, 0xb0, 0xd7, + 0x90, 0x88, 0x1e, 0x26, 0x72, 0x41, 0xcd, 0x95, 0x89, 0xe1, 0x90, 0xfa, 0xfa, 0xfc, 0xc3, 0xd4, + 0x62, 0xaa, 0x0f, 0x0d, 0x87, 0xd0, 0xc3, 0x44, 0xbc, 0x16, 0x52, 0xe1, 0xa9, 0x09, 0xb1, 0xb4, + 0xb3, 0x4b, 0xe6, 0x46, 0x61, 0x6f, 0x6c, 0xcd, 0xd0, 0xeb, 0x88, 0x39, 0x7c, 0x39, 0xce, 0xe1, + 0x43, 0x66, 0x44, 0x5d, 0xb4, 0x5c, 0x93, 0x76, 0x0a, 0x6f, 0x4c, 0xa6, 0xc5, 0x8d, 0x55, 0xc8, + 0x4f, 0xd4, 0xe1, 0x98, 0xc8, 0x2f, 0xc2, 0x5a, 0x20, 0xf8, 0xa1, 0x3a, 0xac, 0x8e, 0x88, 0x6d, + 0xab, 0xe7, 0x84, 0xc5, 0xca, 0x22, 0x76, 0x9b, 0x72, 0x05, 0x4a, 0xc1, 0x80, 0x27, 0x7f, 0x91, + 0xf6, 0x2c, 0x69, 0x2c, 0xa3, 0x96, 0x13, 0x62, 0xb1, 0x6e, 0x0a, 0x4b, 0xd1, 0x44, 0x37, 0xa1, + 0xcc, 0x4e, 0xa5, 0xe2, 0xbe, 0xa7, 0x01, 0x35, 0x87, 0x4b, 0x4c, 0xf8, 0x50, 0x28, 0x6d, 0xc1, + 0x9a, 0xb9, 0x6b, 0x7a, 0x2a, 0x59, 0xa6, 0x02, 0xe6, 0xae, 0xe9, 0x2a, 0x3c, 0x07, 0x25, 0x3a, + 0x56, 0x4f, 0x23, 0xc7, 0x3e, 0xb2, 0x46, 0x65, 0x42, 0x45, 0xfe, 0x63, 0x06, 0x6a, 0xd1, 0x20, + 0x89, 0xee, 0x42, 0x8e, 0xe6, 0x0b, 0x11, 0xfa, 0xa5, 0x6d, 0x9e, 0x4c, 0xb6, 0xdd, 0x64, 0xb2, + 0xdd, 0x71, 0x93, 0x49, 0xa3, 0xf0, 0xf5, 0xb7, 0x5b, 0xa9, 0x2f, 0xfe, 0xb2, 0x95, 0xc6, 0xcc, + 0x02, 0x3d, 0x43, 0x63, 0x9a, 0xaa, 0xe9, 0x8a, 0xd6, 0x67, 0x5d, 0x2e, 0xd2, 0x80, 0xa5, 0x6a, + 0xfa, 0x41, 0x1f, 0x1d, 0x42, 0xad, 0x67, 0xe8, 0x36, 0xd1, 0xed, 0xb1, 0xad, 0xf0, 0x64, 0x25, + 0x02, 0x7e, 0x28, 0x6c, 0xf1, 0x14, 0xd8, 0x74, 0x35, 0x4f, 0x98, 0x22, 0xae, 0xf6, 0xc2, 0x02, + 0xb4, 0x0f, 0x30, 0x51, 0x87, 0x5a, 0x5f, 0x75, 0x0c, 0xcb, 0xae, 0xe7, 0x6e, 0x64, 0x67, 0xc6, + 0xae, 0x87, 0xae, 0xca, 0x03, 0xb3, 0xaf, 0x3a, 0xa4, 0x91, 0xa3, 0xdd, 0xc5, 0x01, 0x4b, 0xf4, + 0x02, 0x54, 0x55, 0xd3, 0x54, 0x6c, 0x47, 0x75, 0x88, 0xd2, 0xbd, 0x74, 0x88, 0xcd, 0x92, 0x41, + 0x09, 0x97, 0x55, 0xd3, 0x3c, 0xa5, 0xd2, 0x06, 0x15, 0xa2, 0xe7, 0xa1, 0x42, 0xf3, 0x86, 0xa6, + 0x0e, 0x95, 0x01, 0xd1, 0xce, 0x07, 0x0e, 0x0b, 0xfb, 0x59, 0x5c, 0x16, 0xd2, 0x36, 0x13, 0xca, + 0x7d, 0x6f, 0xc5, 0x59, 0xce, 0x40, 0x08, 0x72, 0x7d, 0xd5, 0x51, 0xd9, 0x4c, 0x96, 0x30, 0x7b, + 0xa6, 0x32, 0x53, 0x75, 0x06, 0x62, 0x7e, 0xd8, 0x33, 0xba, 0x0a, 0x2b, 0xc2, 0x6d, 0x96, 0xb9, + 0x15, 0x2d, 0x74, 0x05, 0xf2, 0xa6, 0x65, 0x4c, 0x08, 0x5b, 0xba, 0x02, 0xe6, 0x0d, 0xf9, 0x17, + 0x19, 0x58, 0x9f, 0xca, 0x2e, 0xd4, 0xef, 0x40, 0xb5, 0x07, 0xee, 0xb7, 0xe8, 0x33, 0x7a, 0x83, + 0xfa, 0x55, 0xfb, 0xc4, 0x12, 0x19, 0xb9, 0x3e, 0x3d, 0xd5, 0x6d, 0xf6, 0x5e, 0x4c, 0x8d, 0xd0, + 0x46, 0xc7, 0x50, 0x1b, 0xaa, 0xb6, 0xa3, 0xf0, 0x68, 0xad, 0x04, 0xb2, 0xf3, 0x74, 0x8e, 0x3a, + 0x54, 0xdd, 0xf8, 0x4e, 0x37, 0xb5, 0x70, 0x54, 0x19, 0x86, 0xa4, 0x08, 0xc3, 0x95, 0xee, 0xe5, + 0x13, 0x55, 0x77, 0x34, 0x9d, 0x28, 0x53, 0x2b, 0xf7, 0xcc, 0x94, 0xd3, 0xd6, 0x44, 0xeb, 0x13, + 0xbd, 0xe7, 0x2e, 0xd9, 0x86, 0x67, 0xec, 0x2d, 0xa9, 0x2d, 0x63, 0xa8, 0x84, 0xf3, 0x23, 0xaa, + 0x40, 0xc6, 0xb9, 0x10, 0x13, 0x90, 0x71, 0x2e, 0xd0, 0x7f, 0x41, 0x8e, 0x0e, 0x92, 0x0d, 0xbe, + 0x32, 0x03, 0x58, 0x08, 0xbb, 0xce, 0xa5, 0x49, 0x30, 0xd3, 0x94, 0x65, 0xef, 0x38, 0x78, 0x39, + 0x33, 0xea, 0x55, 0x7e, 0x09, 0xaa, 0x91, 0xa4, 0x18, 0x58, 0xbf, 0x74, 0x70, 0xfd, 0xe4, 0x2a, + 0x94, 0x43, 0x19, 0x50, 0xbe, 0x0a, 0x57, 0x66, 0x25, 0x34, 0x79, 0xe0, 0xc9, 0x43, 0x89, 0x09, + 0xbd, 0x0e, 0x05, 0x2f, 0xa3, 0xf1, 0xe3, 0x38, 0x3d, 0x57, 0xae, 0x32, 0xf6, 0x54, 0xe9, 0x39, + 0xa4, 0xdb, 0x9a, 0xed, 0x87, 0x0c, 0xeb, 0xf8, 0xaa, 0x6a, 0x9a, 0x6d, 0xd5, 0x1e, 0xc8, 0x1f, + 0x43, 0x3d, 0x2e, 0x5b, 0x45, 0x86, 0x91, 0xf3, 0xb6, 0xe1, 0x55, 0x58, 0x39, 0x33, 0xac, 0x91, + 0xea, 0x30, 0x67, 0x65, 0x2c, 0x5a, 0x74, 0x7b, 0xf2, 0xcc, 0x95, 0x65, 0x62, 0xde, 0x90, 0x15, + 0x78, 0x26, 0x36, 0x63, 0x51, 0x13, 0x4d, 0xef, 0x13, 0x3e, 0x9f, 0x65, 0xcc, 0x1b, 0xbe, 0x23, + 0xde, 0x59, 0xde, 0xa0, 0x9f, 0xb5, 0xd9, 0x58, 0x99, 0xff, 0x22, 0x16, 0x2d, 0x59, 0x81, 0xab, + 0xb3, 0xd3, 0x16, 0xba, 0x0e, 0xc0, 0xe3, 0xa6, 0x38, 0x75, 0xd9, 0x5b, 0x25, 0x5c, 0x64, 0x92, + 0x7b, 0xf4, 0xe8, 0xbd, 0x00, 0x55, 0xff, 0xb5, 0x62, 0x6b, 0x4f, 0xf8, 0xd6, 0xc8, 0xe2, 0xb2, + 0xa7, 0x73, 0xaa, 0x3d, 0x21, 0xf2, 0xdb, 0xde, 0xf9, 0xf2, 0x13, 0x0e, 0xba, 0x0d, 0x39, 0x96, + 0xa2, 0xf8, 0x32, 0x5c, 0x9d, 0x3e, 0x49, 0x54, 0x0b, 0x33, 0x1d, 0xb9, 0x0d, 0x52, 0x7c, 0x82, + 0x59, 0xca, 0x53, 0x37, 0x30, 0xd6, 0x70, 0xe6, 0xf4, 0xcf, 0x76, 0x7a, 0xa9, 0xb3, 0x5d, 0x83, + 0xac, 0x73, 0x61, 0xd7, 0x33, 0x6c, 0x72, 0xe8, 0xa3, 0xfc, 0x0f, 0x80, 0x02, 0x26, 0xb6, 0x49, + 0x63, 0x2c, 0x6a, 0x40, 0x91, 0x5c, 0xf4, 0x88, 0xe9, 0xb8, 0x69, 0x69, 0x76, 0x3a, 0xe6, 0xda, + 0x2d, 0x57, 0x93, 0x02, 0x4b, 0xcf, 0x0c, 0xdd, 0x11, 0xdc, 0x21, 0x9e, 0x06, 0x08, 0xf3, 0x20, + 0x79, 0x78, 0xc3, 0x25, 0x0f, 0xd9, 0x58, 0x2c, 0xc9, 0xad, 0x22, 0xec, 0xe1, 0x8e, 0x60, 0x0f, + 0xb9, 0x05, 0x1f, 0x0b, 0xd1, 0x87, 0x66, 0x88, 0x3e, 0xe4, 0x17, 0x0c, 0x33, 0x86, 0x3f, 0xbc, + 0xe1, 0xf2, 0x87, 0x95, 0x05, 0x3d, 0x8e, 0x10, 0x88, 0xfd, 0x30, 0x81, 0xe0, 0xe0, 0xff, 0x66, + 0xac, 0x75, 0x2c, 0x83, 0xf8, 0xdf, 0x00, 0x83, 0x28, 0xc4, 0xc2, 0x77, 0xee, 0x64, 0x06, 0x85, + 0x68, 0x86, 0x28, 0x44, 0x71, 0xc1, 0x1c, 0xc4, 0x70, 0x88, 0x77, 0x82, 0x1c, 0x02, 0x62, 0x69, + 0x88, 0x58, 0xef, 0x59, 0x24, 0xe2, 0x4d, 0x8f, 0x44, 0xac, 0xc5, 0xb2, 0x20, 0x31, 0x86, 0x28, + 0x8b, 0x38, 0x9e, 0x62, 0x11, 0x1c, 0xf5, 0xbf, 0x10, 0xeb, 0x62, 0x01, 0x8d, 0x38, 0x9e, 0xa2, + 0x11, 0xe5, 0x05, 0x0e, 0x17, 0xf0, 0x88, 0x0f, 0x67, 0xf3, 0x88, 0x78, 0xa4, 0x2f, 0xba, 0x99, + 0x8c, 0x48, 0x28, 0x31, 0x44, 0xa2, 0x1a, 0x0b, 0x7a, 0xb9, 0xfb, 0xc4, 0x4c, 0xe2, 0xc1, 0x0c, + 0x26, 0xc1, 0x31, 0xff, 0xad, 0x58, 0xe7, 0x09, 0xa8, 0xc4, 0x83, 0x19, 0x54, 0x62, 0x7d, 0xa1, + 0xdb, 0x85, 0x5c, 0x62, 0x3f, 0xcc, 0x25, 0xd0, 0x82, 0x73, 0x15, 0x4b, 0x26, 0xba, 0x71, 0x64, + 0x62, 0x83, 0x79, 0x7c, 0x25, 0xd6, 0xe3, 0x0f, 0x61, 0x13, 0x2f, 0xd1, 0x5c, 0x13, 0x89, 0xa6, + 0x34, 0x1f, 0x12, 0xcb, 0x32, 0x2c, 0xc1, 0x0b, 0x78, 0x43, 0xbe, 0x45, 0xd1, 0xa5, 0x1f, 0x39, + 0xe7, 0x30, 0x0f, 0x86, 0x3b, 0x02, 0xd1, 0x52, 0xfe, 0x7d, 0xda, 0xb7, 0x65, 0x80, 0x2c, 0x88, + 0x4c, 0x8b, 0x02, 0x99, 0x06, 0xf8, 0x48, 0x26, 0xcc, 0x47, 0xb6, 0x60, 0x8d, 0xe2, 0x89, 0x08, + 0xd5, 0x50, 0x4d, 0x8f, 0x6a, 0xdc, 0x86, 0x75, 0x06, 0x18, 0x79, 0x7a, 0x15, 0x20, 0x22, 0xc7, + 0x72, 0x6b, 0x95, 0xbe, 0xe0, 0xc7, 0x9e, 0xa3, 0x89, 0x57, 0x61, 0x23, 0xa0, 0xeb, 0xe1, 0x14, + 0x8e, 0xbb, 0x6b, 0x9e, 0xf6, 0x9e, 0x00, 0x2c, 0x5f, 0xa5, 0xfd, 0x19, 0xf2, 0x39, 0xca, 0x2c, + 0x3a, 0x91, 0xfe, 0x37, 0xd1, 0x89, 0xcc, 0x0f, 0xa6, 0x13, 0x41, 0xdc, 0x95, 0x0d, 0xe3, 0xae, + 0xbf, 0xa7, 0xfd, 0x35, 0xf1, 0xc8, 0x41, 0xcf, 0xe8, 0x13, 0x81, 0x84, 0xd8, 0x33, 0x4d, 0xce, + 0x43, 0xe3, 0x5c, 0xe0, 0x1d, 0xfa, 0x48, 0xb5, 0xbc, 0xf4, 0x56, 0x14, 0xd9, 0xcb, 0x03, 0x51, + 0x79, 0x36, 0xc3, 0x02, 0x44, 0xd5, 0x20, 0xfb, 0x98, 0xf0, 0x64, 0x54, 0xc2, 0xf4, 0x91, 0xea, + 0xb1, 0x4d, 0xc6, 0x52, 0x4c, 0x09, 0xf3, 0x06, 0xba, 0x0b, 0x45, 0x56, 0x86, 0x54, 0x0c, 0xd3, + 0x16, 0x79, 0xe3, 0xd9, 0xe0, 0x58, 0x79, 0xb5, 0x71, 0xfb, 0x84, 0xea, 0x1c, 0x9b, 0x36, 0x2e, + 0x98, 0xe2, 0x29, 0x80, 0x0f, 0x8b, 0x21, 0x9a, 0x72, 0x0d, 0x8a, 0xb4, 0xf7, 0xb6, 0xa9, 0xf6, + 0x08, 0x4b, 0x02, 0x45, 0xec, 0x0b, 0xe4, 0x47, 0x80, 0xa6, 0x53, 0x19, 0x6a, 0xc3, 0x0a, 0x99, + 0x10, 0xdd, 0xb1, 0x19, 0x4c, 0x8b, 0xc0, 0x20, 0xc1, 0x01, 0x88, 0xee, 0x34, 0xea, 0x74, 0x92, + 0xff, 0xf6, 0xed, 0x56, 0x8d, 0x6b, 0xbf, 0x62, 0x8c, 0x34, 0x87, 0x8c, 0x4c, 0xe7, 0x12, 0x0b, + 0x7b, 0xf9, 0xcf, 0x19, 0x0a, 0xc8, 0x43, 0x69, 0x6e, 0xe6, 0xdc, 0xba, 0x5b, 0x3e, 0x13, 0x20, + 0x63, 0xc9, 0xe6, 0x7b, 0x13, 0xe0, 0x5c, 0xb5, 0x95, 0xcf, 0x54, 0xdd, 0x21, 0x7d, 0x31, 0xe9, + 0x01, 0x09, 0x92, 0xa0, 0x40, 0x5b, 0x63, 0x9b, 0xf4, 0x05, 0x2f, 0xf4, 0xda, 0x81, 0x71, 0xae, + 0xfe, 0xb8, 0x71, 0x86, 0x67, 0xb9, 0x10, 0x99, 0xe5, 0x00, 0x58, 0x2e, 0x06, 0xc1, 0x32, 0xed, + 0x9b, 0x69, 0x69, 0x86, 0xa5, 0x39, 0x97, 0x6c, 0x69, 0xb2, 0xd8, 0x6b, 0xa3, 0x9b, 0x50, 0x1e, + 0x91, 0x91, 0x69, 0x18, 0x43, 0x85, 0x87, 0x9b, 0x35, 0x66, 0x5a, 0x12, 0xc2, 0x16, 0x8b, 0x3a, + 0xbf, 0xcc, 0xf8, 0xe7, 0xcf, 0x27, 0x45, 0xff, 0x71, 0x13, 0x2c, 0xff, 0x8a, 0x95, 0x4a, 0xc2, + 0x40, 0x06, 0x9d, 0xc2, 0xba, 0x77, 0xfc, 0x95, 0x31, 0x0b, 0x0b, 0xee, 0x86, 0x4e, 0x1a, 0x3f, + 0x6a, 0x93, 0xb0, 0xd8, 0x46, 0x1f, 0xc0, 0xd3, 0x91, 0xd8, 0xe6, 0xb9, 0xce, 0x24, 0x0d, 0x71, + 0x4f, 0x85, 0x43, 0x9c, 0xeb, 0xda, 0x9f, 0xac, 0xec, 0x8f, 0x3c, 0x75, 0x07, 0x94, 0x7d, 0x07, + 0x71, 0xd9, 0xcc, 0xe5, 0xbf, 0x09, 0x65, 0x8b, 0x38, 0xaa, 0xa6, 0x2b, 0xa1, 0xfa, 0x46, 0x89, + 0x0b, 0x45, 0xd5, 0xe4, 0x04, 0x9e, 0x9a, 0x89, 0xcf, 0xd0, 0x7f, 0x43, 0xd1, 0x87, 0x76, 0xe9, + 0x98, 0x52, 0x81, 0x47, 0x7f, 0x7d, 0x5d, 0xf9, 0x0f, 0x69, 0xdf, 0x65, 0x98, 0x50, 0xb7, 0x60, + 0xc5, 0x22, 0xf6, 0x78, 0xc8, 0x29, 0x6e, 0x65, 0xf7, 0xd5, 0x64, 0xc8, 0x8e, 0x4a, 0xc7, 0x43, + 0x07, 0x0b, 0x63, 0xf9, 0x11, 0xac, 0x70, 0x09, 0x5a, 0x83, 0xd5, 0x07, 0x47, 0xf7, 0x8f, 0x8e, + 0xdf, 0x3f, 0xaa, 0xa5, 0x10, 0xc0, 0xca, 0x5e, 0xb3, 0xd9, 0x3a, 0xe9, 0xd4, 0xd2, 0xa8, 0x08, + 0xf9, 0xbd, 0xc6, 0x31, 0xee, 0xd4, 0x32, 0x54, 0x8c, 0x5b, 0xef, 0xb5, 0x9a, 0x9d, 0x5a, 0x16, + 0xad, 0x43, 0x99, 0x3f, 0x2b, 0xfb, 0xc7, 0xf8, 0xff, 0xf7, 0x3a, 0xb5, 0x5c, 0x40, 0x74, 0xda, + 0x3a, 0xba, 0xd7, 0xc2, 0xb5, 0xbc, 0xfc, 0x1a, 0xe5, 0xd0, 0x31, 0x58, 0xd0, 0x67, 0xcb, 0xe9, + 0x00, 0x5b, 0x96, 0x7f, 0x9d, 0xa1, 0xa4, 0x33, 0x0e, 0xe0, 0xa1, 0xf7, 0x22, 0x03, 0xdf, 0x5d, + 0x02, 0x1d, 0x46, 0x46, 0x8f, 0x9e, 0x87, 0x8a, 0x45, 0xce, 0x88, 0xd3, 0x1b, 0x70, 0xc0, 0xc9, + 0x53, 0x66, 0x19, 0x97, 0x85, 0x94, 0x19, 0xd9, 0x5c, 0xed, 0x13, 0xd2, 0x73, 0x14, 0x1e, 0x8b, + 0xf8, 0xa6, 0x2b, 0x52, 0x35, 0x2a, 0x3d, 0xe5, 0x42, 0xf9, 0xe3, 0xa5, 0xe6, 0xb2, 0x08, 0x79, + 0xdc, 0xea, 0xe0, 0x0f, 0x6a, 0x59, 0x84, 0xa0, 0xc2, 0x1e, 0x95, 0xd3, 0xa3, 0xbd, 0x93, 0xd3, + 0xf6, 0x31, 0x9d, 0xcb, 0x0d, 0xa8, 0xba, 0x73, 0xe9, 0x0a, 0xf3, 0xf2, 0x5d, 0x78, 0x3a, 0x06, + 0x9d, 0x2e, 0xa8, 0x18, 0xc8, 0x1f, 0xfa, 0xb9, 0x2b, 0x50, 0x0a, 0xd8, 0x87, 0x4a, 0x04, 0x19, + 0xa6, 0xa7, 0xa9, 0x8b, 0x4f, 0xe5, 0x3d, 0xd4, 0x87, 0xcb, 0x93, 0x60, 0x53, 0xfe, 0x6d, 0x1a, + 0x9e, 0x9d, 0x83, 0x1d, 0xd1, 0xfd, 0xc8, 0x9a, 0xdd, 0x59, 0x06, 0x79, 0x46, 0xb7, 0xec, 0xdd, + 0x44, 0xd3, 0x7c, 0x7a, 0xb8, 0x77, 0xda, 0x0e, 0x6f, 0x59, 0xf9, 0x77, 0xe9, 0xe0, 0xfc, 0x85, + 0x31, 0xf7, 0xbb, 0x91, 0x2e, 0xee, 0x24, 0x05, 0xf0, 0xd1, 0x3d, 0x25, 0x41, 0x81, 0x88, 0xa2, + 0x9f, 0xa8, 0x4d, 0x78, 0x6d, 0xf9, 0xd5, 0xc5, 0x5d, 0xf7, 0xfb, 0x9b, 0x91, 0x3f, 0x82, 0x4a, + 0xb8, 0x28, 0x49, 0x4f, 0x8c, 0x65, 0x8c, 0xf5, 0x3e, 0xeb, 0x64, 0x1e, 0xf3, 0x06, 0x7a, 0x1d, + 0xf2, 0x74, 0x3d, 0x5c, 0xc0, 0x37, 0x1d, 0x5a, 0xe8, 0x7c, 0x06, 0x8a, 0x9a, 0x5c, 0x5b, 0x7e, + 0x02, 0x79, 0x16, 0x24, 0x69, 0xc0, 0x63, 0xe5, 0x45, 0x81, 0xa1, 0xe9, 0x33, 0xfa, 0x08, 0x40, + 0x75, 0x1c, 0x4b, 0xeb, 0x8e, 0x7d, 0xc7, 0x5b, 0xb3, 0x83, 0xec, 0x9e, 0xab, 0xd7, 0xb8, 0x26, + 0xa2, 0xed, 0x15, 0xdf, 0x34, 0x10, 0x71, 0x03, 0x0e, 0xe5, 0x23, 0xa8, 0x84, 0x6d, 0x5d, 0xd4, + 0xc7, 0xfb, 0x10, 0x46, 0x7d, 0x1c, 0xc4, 0x0b, 0xd4, 0xe7, 0x61, 0xc6, 0x2c, 0x2f, 0x25, 0xb3, + 0x86, 0xfc, 0x79, 0x1a, 0x0a, 0x9d, 0x0b, 0x31, 0xb9, 0x31, 0x55, 0x4c, 0xdf, 0x34, 0x13, 0xac, + 0xd9, 0xf1, 0xb2, 0x68, 0xd6, 0x2b, 0xb6, 0xbe, 0xe3, 0xed, 0x84, 0x5c, 0xd2, 0x52, 0x82, 0x5b, + 0x99, 0x12, 0x3b, 0xf4, 0x2d, 0x28, 0x7a, 0x29, 0x92, 0x92, 0x11, 0xb5, 0xdf, 0xb7, 0x88, 0x6d, + 0x8b, 0x30, 0xe7, 0x36, 0x59, 0x51, 0xdc, 0xf8, 0x4c, 0x54, 0x05, 0xb3, 0x98, 0x37, 0xe4, 0x3e, + 0x54, 0x23, 0xf9, 0x15, 0xbd, 0x05, 0xab, 0xe6, 0xb8, 0xab, 0xb8, 0xd3, 0x13, 0xf9, 0xb5, 0xec, + 0xc2, 0xdc, 0x71, 0x77, 0xa8, 0xf5, 0xee, 0x93, 0x4b, 0xb7, 0x33, 0xe6, 0xb8, 0x7b, 0x9f, 0xcf, + 0x22, 0xff, 0x4a, 0x26, 0xf8, 0x95, 0x09, 0x14, 0xdc, 0x4d, 0x81, 0xfe, 0x0f, 0x8a, 0x5e, 0xea, + 0xf6, 0xfe, 0x95, 0xc4, 0xe6, 0x7c, 0xe1, 0xde, 0x37, 0xa1, 0x9c, 0xc9, 0xd6, 0xce, 0x75, 0xd2, + 0x57, 0x7c, 0x3a, 0xc4, 0xbe, 0x56, 0xc0, 0x55, 0xfe, 0xe2, 0xd0, 0xe5, 0x42, 0xf2, 0x3f, 0xd3, + 0x50, 0x70, 0x6b, 0xe2, 0xe8, 0xb5, 0xc0, 0xbe, 0xab, 0xcc, 0xa8, 0x78, 0xb9, 0x8a, 0x7e, 0x5d, + 0x3b, 0xdc, 0xd7, 0xcc, 0xf2, 0x7d, 0x8d, 0xfb, 0x41, 0xe1, 0xfe, 0x2a, 0xca, 0x2d, 0xfd, 0xab, + 0xe8, 0x15, 0x40, 0x8e, 0xe1, 0xa8, 0x43, 0xca, 0xb1, 0x35, 0xfd, 0x5c, 0xe1, 0x93, 0xcd, 0xa1, + 0x5f, 0x8d, 0xbd, 0x79, 0xc8, 0x5e, 0x9c, 0xb0, 0x79, 0xff, 0x79, 0x1a, 0x0a, 0x5e, 0x0e, 0x5f, + 0xb6, 0x4c, 0x7d, 0x15, 0x56, 0x44, 0x9a, 0xe2, 0x75, 0x6a, 0xd1, 0xf2, 0xfe, 0x98, 0xe4, 0x02, + 0x7f, 0x4c, 0x24, 0x28, 0x8c, 0x88, 0xa3, 0xb2, 0x6c, 0xc0, 0x19, 0xa9, 0xd7, 0xbe, 0xfd, 0x26, + 0xac, 0x05, 0xfe, 0x18, 0xd0, 0x93, 0x77, 0xd4, 0x7a, 0xbf, 0x96, 0x92, 0x56, 0x3f, 0xff, 0xf2, + 0x46, 0xf6, 0x88, 0x7c, 0x46, 0xf7, 0x2c, 0x6e, 0x35, 0xdb, 0xad, 0xe6, 0xfd, 0x5a, 0x5a, 0x5a, + 0xfb, 0xfc, 0xcb, 0x1b, 0xab, 0x98, 0xb0, 0x6a, 0xdb, 0xed, 0x36, 0x94, 0x82, 0xab, 0x12, 0x8e, + 0x63, 0x08, 0x2a, 0xf7, 0x1e, 0x9c, 0x1c, 0x1e, 0x34, 0xf7, 0x3a, 0x2d, 0xe5, 0xe1, 0x71, 0xa7, + 0x55, 0x4b, 0xa3, 0xa7, 0x61, 0xe3, 0xf0, 0xe0, 0xdd, 0x76, 0x47, 0x69, 0x1e, 0x1e, 0xb4, 0x8e, + 0x3a, 0xca, 0x5e, 0xa7, 0xb3, 0xd7, 0xbc, 0x5f, 0xcb, 0xec, 0x7e, 0x55, 0x82, 0xea, 0x5e, 0xa3, + 0x79, 0x40, 0xb3, 0xb4, 0xd6, 0x53, 0x59, 0xb9, 0xa0, 0x09, 0x39, 0x56, 0x10, 0x98, 0x7b, 0x4b, + 0x43, 0x9a, 0x5f, 0x87, 0x45, 0xfb, 0x90, 0x67, 0xb5, 0x02, 0x34, 0xff, 0xda, 0x86, 0xb4, 0xa0, + 0x30, 0x4b, 0x3b, 0xc3, 0x8e, 0xc7, 0xdc, 0x7b, 0x1c, 0xd2, 0xfc, 0x3a, 0x2d, 0xc2, 0x50, 0xf4, + 0xb9, 0xc6, 0xe2, 0x7b, 0x0d, 0x52, 0x82, 0x60, 0x83, 0x0e, 0x61, 0xd5, 0xa5, 0x87, 0x8b, 0x6e, + 0x5a, 0x48, 0x0b, 0x0b, 0xa9, 0x74, 0xba, 0x38, 0x8d, 0x9f, 0x7f, 0x6d, 0x44, 0x5a, 0x50, 0x15, + 0x46, 0x07, 0xb0, 0x22, 0xf0, 0xf3, 0x82, 0xdb, 0x13, 0xd2, 0xa2, 0xc2, 0x28, 0x9d, 0x34, 0xbf, + 0x40, 0xb2, 0xf8, 0x32, 0x8c, 0x94, 0xa0, 0xe0, 0x8d, 0x1e, 0x00, 0x04, 0x48, 0x7b, 0x82, 0x5b, + 0x2e, 0x52, 0x92, 0x42, 0x36, 0x3a, 0x86, 0x82, 0xc7, 0xa1, 0x16, 0xde, 0x39, 0x91, 0x16, 0x57, + 0x94, 0xd1, 0x23, 0x28, 0x87, 0xb9, 0x43, 0xb2, 0x9b, 0x24, 0x52, 0xc2, 0x52, 0x31, 0xf5, 0x1f, + 0x26, 0x12, 0xc9, 0x6e, 0x96, 0x48, 0x09, 0x2b, 0xc7, 0xe8, 0x13, 0x58, 0x9f, 0x06, 0xfa, 0xc9, + 0x2f, 0x9a, 0x48, 0x4b, 0xd4, 0x92, 0xd1, 0x08, 0xd0, 0x0c, 0x82, 0xb0, 0xc4, 0xbd, 0x13, 0x69, + 0x99, 0xd2, 0x32, 0xea, 0x43, 0x35, 0x8a, 0xba, 0x93, 0xde, 0x43, 0x91, 0x12, 0x97, 0x99, 0xf9, + 0x57, 0xc2, 0xd8, 0x34, 0xe9, 0xbd, 0x14, 0x29, 0x71, 0xd5, 0x99, 0x1e, 0x87, 0x00, 0x0f, 0x48, + 0x70, 0x4f, 0x45, 0x4a, 0x52, 0x7f, 0x46, 0x26, 0x6c, 0xcc, 0xc2, 0xff, 0xcb, 0x5c, 0x5b, 0x91, + 0x96, 0x2a, 0x4b, 0x37, 0x5a, 0x5f, 0x7f, 0xb7, 0x99, 0xfe, 0xe6, 0xbb, 0xcd, 0xf4, 0x5f, 0xbf, + 0xdb, 0x4c, 0x7f, 0xf1, 0xfd, 0x66, 0xea, 0x9b, 0xef, 0x37, 0x53, 0x7f, 0xfa, 0x7e, 0x33, 0xf5, + 0xd3, 0x97, 0xcf, 0x35, 0x67, 0x30, 0xee, 0x6e, 0xf7, 0x8c, 0xd1, 0x4e, 0xf0, 0x96, 0xe1, 0xac, + 0x9b, 0x8f, 0xdd, 0x15, 0x96, 0xe9, 0xef, 0xfc, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xe2, 0x4c, + 0x31, 0x19, 0x29, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3287,7 +3738,10 @@ type ABCIApplicationClient interface { OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) - PreprocessTxs(ctx context.Context, in *RequestPreprocessTxs, opts ...grpc.CallOption) (*ResponsePreprocessTxs, error) + PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) + ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) + ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) + VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) } type aBCIApplicationClient struct { @@ -3424,9 +3878,36 @@ func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *Requ return out, nil } -func (c *aBCIApplicationClient) PreprocessTxs(ctx context.Context, in *RequestPreprocessTxs, opts ...grpc.CallOption) (*ResponsePreprocessTxs, error) { - out := new(ResponsePreprocessTxs) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/PreprocessTxs", in, out, opts...) +func (c *aBCIApplicationClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) { + out := new(ResponsePrepareProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/PrepareProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) { + out := new(ResponseProcessProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) { + out := new(ResponseExtendVote) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ExtendVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) { + out := new(ResponseVerifyVoteExtension) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/VerifyVoteExtension", in, out, opts...) if err != nil { return nil, err } @@ -3449,7 +3930,10 @@ type ABCIApplicationServer interface { OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) - PreprocessTxs(context.Context, *RequestPreprocessTxs) (*ResponsePreprocessTxs, error) + PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) + ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) + ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) + VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -3498,8 +3982,17 @@ func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") } -func (*UnimplementedABCIApplicationServer) PreprocessTxs(ctx context.Context, req *RequestPreprocessTxs) (*ResponsePreprocessTxs, error) { - return nil, status.Errorf(codes.Unimplemented, "method PreprocessTxs not implemented") +func (*UnimplementedABCIApplicationServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ExtendVote(ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") +} +func (*UnimplementedABCIApplicationServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") } func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { @@ -3758,20 +4251,74 @@ func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } -func _ABCIApplication_PreprocessTxs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPreprocessTxs) +func _ABCIApplication_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPrepareProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestProcessProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ProcessProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestExtendVote) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ExtendVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ExtendVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ExtendVote(ctx, req.(*RequestExtendVote)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVerifyVoteExtension) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ABCIApplicationServer).PreprocessTxs(ctx, in) + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/PreprocessTxs", + FullMethod: "/tendermint.abci.ABCIApplication/VerifyVoteExtension", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).PreprocessTxs(ctx, req.(*RequestPreprocessTxs)) + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) } return interceptor(ctx, in, info, handler) } @@ -3837,8 +4384,20 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ Handler: _ABCIApplication_ApplySnapshotChunk_Handler, }, { - MethodName: "PreprocessTxs", - Handler: _ABCIApplication_PreprocessTxs_Handler, + MethodName: "PrepareProposal", + Handler: _ABCIApplication_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCIApplication_ProcessProposal_Handler, + }, + { + MethodName: "ExtendVote", + Handler: _ABCIApplication_ExtendVote_Handler, + }, + { + MethodName: "VerifyVoteExtension", + Handler: _ABCIApplication_VerifyVoteExtension_Handler, }, }, Streams: []grpc.StreamDesc{}, @@ -4171,16 +4730,16 @@ func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } return len(dAtA) - i, nil } -func (m *Request_PreprocessTxs) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_PreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.PreprocessTxs != nil { + if m.PrepareProposal != nil { { - size, err := m.PreprocessTxs.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4192,41 +4751,110 @@ func (m *Request_PreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *RequestEcho) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } return len(dAtA) - i, nil } - -func (m *RequestFlush) Marshal() (dAtA []byte, err error) { +func (m *Request_ExtendVote) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Request_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *RequestEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { return nil, err } return dAtA[:n], nil @@ -4357,12 +4985,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err17 != nil { - return 0, err17 + n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err20 != nil { + return 0, err20 } - i -= n17 - i = encodeVarintTypes(dAtA, i, uint64(n17)) + i -= n20 + i = encodeVarintTypes(dAtA, i, uint64(n20)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -4745,7 +5373,114 @@ func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *RequestPreprocessTxs) Marshal() (dAtA []byte, err error) { +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockDataSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockDataSize)) + i-- + dAtA[i] = 0x10 + } + if len(m.BlockData) > 0 { + for iNdEx := len(m.BlockData) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlockData[iNdEx]) + copy(dAtA[i:], m.BlockData[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockData[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4755,12 +5490,12 @@ func (m *RequestPreprocessTxs) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestPreprocessTxs) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestPreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4771,9 +5506,19 @@ func (m *RequestPreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.Txs[iNdEx]) i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + } + } + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -5124,16 +5869,16 @@ func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, er } return len(dAtA) - i, nil } -func (m *Response_PreprocessTxs) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_PreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.PreprocessTxs != nil { + if m.PrepareProposal != nil { { - size, err := m.PreprocessTxs.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5147,6 +5892,75 @@ func (m *Response_PreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) } return len(dAtA) - i, nil } +func (m *Response_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Response_ExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Response_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} func (m *ResponseException) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5864,20 +6678,20 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err } } if len(m.RefetchChunks) > 0 { - dAtA41 := make([]byte, len(m.RefetchChunks)*10) - var j40 int + dAtA50 := make([]byte, len(m.RefetchChunks)*10) + var j49 int for _, num := range m.RefetchChunks { for num >= 1<<7 { - dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) + dAtA50[j49] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j40++ + j49++ } - dAtA41[j40] = uint8(num) - j40++ + dAtA50[j49] = uint8(num) + j49++ } - i -= j40 - copy(dAtA[i:], dAtA41[:j40]) - i = encodeVarintTypes(dAtA, i, uint64(j40)) + i -= j49 + copy(dAtA[i:], dAtA50[:j49]) + i = encodeVarintTypes(dAtA, i, uint64(j49)) i-- dAtA[i] = 0x12 } @@ -5889,7 +6703,7 @@ func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ResponsePreprocessTxs) Marshal() (dAtA []byte, err error) { +func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5899,33 +6713,21 @@ func (m *ResponsePreprocessTxs) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponsePreprocessTxs) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponsePreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Messages != nil { - { - size, err := m.Messages.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Txs) > 0 { - for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Txs[iNdEx]) - copy(dAtA[i:], m.Txs[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + if len(m.BlockData) > 0 { + for iNdEx := len(m.BlockData) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlockData[iNdEx]) + copy(dAtA[i:], m.BlockData[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockData[iNdEx]))) i-- dAtA[i] = 0xa } @@ -5933,7 +6735,7 @@ func (m *ResponsePreprocessTxs) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { +func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5943,29 +6745,129 @@ func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Votes) > 0 { - for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.VoteExtension != nil { + { + size, err := m.VoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Evidence) > 0 { + for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Evidence[iNdEx]) + copy(dAtA[i:], m.Evidence[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Evidence[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } if m.Round != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Round)) @@ -6257,12 +7159,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n46, err46 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err46 != nil { - return 0, err46 + n55, err55 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err55 != nil { + return 0, err55 } - i -= n46 - i = encodeVarintTypes(dAtA, i, uint64(n46)) + i -= n55 + i = encodeVarintTypes(dAtA, i, uint64(n55)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -6531,18 +7433,54 @@ func (m *Request_ApplySnapshotChunk) Size() (n int) { } return n } -func (m *Request_PreprocessTxs) Size() (n int) { +func (m *Request_PrepareProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.PreprocessTxs != nil { - l = m.PreprocessTxs.Size() + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() n += 1 + l + sovTypes(uint64(l)) } return n } +func (m *Request_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *RequestEcho) Size() (n int) { if m == nil { return 0 @@ -6780,12 +7718,58 @@ func (m *RequestApplySnapshotChunk) Size() (n int) { return n } -func (m *RequestPreprocessTxs) Size() (n int) { +func (m *RequestPrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BlockData) > 0 { + for _, b := range m.BlockData { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.BlockDataSize != 0 { + n += 1 + sovTypes(uint64(m.BlockDataSize)) + } + return n +} + +func (m *RequestExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestProcessProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) if len(m.Txs) > 0 { for _, b := range m.Txs { l = len(b) @@ -6987,14 +7971,50 @@ func (m *Response_ApplySnapshotChunk) Size() (n int) { } return n } -func (m *Response_PreprocessTxs) Size() (n int) { +func (m *Response_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ExtendVote) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.PreprocessTxs != nil { - l = m.PreprocessTxs.Size() + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() n += 2 + l + sovTypes(uint64(l)) } return n @@ -7338,25 +8358,64 @@ func (m *ResponseApplySnapshotChunk) Size() (n int) { return n } -func (m *ResponsePreprocessTxs) Size() (n int) { +func (m *ResponsePrepareProposal) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Txs) > 0 { - for _, b := range m.Txs { + if len(m.BlockData) > 0 { + for _, b := range m.BlockData { l = len(b) n += 1 + l + sovTypes(uint64(l)) } } - if m.Messages != nil { - l = m.Messages.Size() + return n +} + +func (m *ResponseExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteExtension != nil { + l = m.VoteExtension.Size() n += 1 + l + sovTypes(uint64(l)) } return n } +func (m *ResponseVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + return n +} + +func (m *ResponseProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if len(m.Evidence) > 0 { + for _, b := range m.Evidence { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *LastCommitInfo) Size() (n int) { if m == nil { return 0 @@ -8054,7 +9113,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreprocessTxs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8081,29 +9140,134 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestPreprocessTxs{} + v := &RequestPrepareProposal{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_PreprocessTxs{v} + m.Value = &Request_PrepareProposal{v} iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { + v := &RequestProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ProcessProposal{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ExtendVote{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestVerifyVoteExtension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_VerifyVoteExtension{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { return io.ErrUnexpectedEOF } return nil @@ -9687,7 +10851,280 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestPreprocessTxs) Unmarshal(dAtA []byte) error { +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockData = append(m.BlockData, make([]byte, postIndex-iNdEx)) + copy(m.BlockData[len(m.BlockData)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockDataSize", wireType) + } + m.BlockDataSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockDataSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &types1.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &types1.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9710,13 +11147,46 @@ func (m *RequestPreprocessTxs) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestPreprocessTxs: wiretype end group for non-group") + return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPreprocessTxs: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) } @@ -10080,7 +11550,112 @@ func (m *Response) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_DeliverTx{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_EndBlock{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10107,15 +11682,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_ListSnapshots{v} iNdEx = postIndex - case 10: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10142,15 +11717,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseOfferSnapshot{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_OfferSnapshot{v} iNdEx = postIndex - case 11: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10177,15 +11752,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCommit{} + v := &ResponseLoadSnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Commit{v} + m.Value = &Response_LoadSnapshotChunk{v} iNdEx = postIndex - case 12: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10212,15 +11787,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseListSnapshots{} + v := &ResponseApplySnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_ListSnapshots{v} + m.Value = &Response_ApplySnapshotChunk{v} iNdEx = postIndex - case 13: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10247,15 +11822,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseOfferSnapshot{} + v := &ResponsePrepareProposal{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_OfferSnapshot{v} + m.Value = &Response_PrepareProposal{v} iNdEx = postIndex - case 14: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10282,15 +11857,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseLoadSnapshotChunk{} + v := &ResponseProcessProposal{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_LoadSnapshotChunk{v} + m.Value = &Response_ProcessProposal{v} iNdEx = postIndex - case 15: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10317,15 +11892,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseApplySnapshotChunk{} + v := &ResponseExtendVote{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_ApplySnapshotChunk{v} + m.Value = &Response_ExtendVote{v} iNdEx = postIndex - case 16: + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreprocessTxs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10352,11 +11927,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponsePreprocessTxs{} + v := &ResponseVerifyVoteExtension{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_PreprocessTxs{v} + m.Value = &Response_VerifyVoteExtension{v} iNdEx = postIndex default: iNdEx = preIndex @@ -12620,7 +14195,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponsePreprocessTxs) Unmarshal(dAtA []byte) error { +func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12643,15 +14218,15 @@ func (m *ResponsePreprocessTxs) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponsePreprocessTxs: wiretype end group for non-group") + return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePreprocessTxs: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockData", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12678,12 +14253,62 @@ func (m *ResponsePreprocessTxs) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) - copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + m.BlockData = append(m.BlockData, make([]byte, postIndex-iNdEx)) + copy(m.BlockData[len(m.BlockData)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12710,12 +14335,182 @@ func (m *ResponsePreprocessTxs) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Messages == nil { - m.Messages = &types1.Messages{} + if m.VoteExtension == nil { + m.VoteExtension = &types1.VoteExtension{} + } + if err := m.VoteExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseVerifyVoteExtension_Result(b&0x7F) << shift + if b < 0x80 { + break + } } - if err := m.Messages.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseProcessProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseProcessProposal_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evidence = append(m.Evidence, make([]byte, postIndex-iNdEx)) + copy(m.Evidence[len(m.Evidence)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/abci/version/version.go b/abci/version/version.go deleted file mode 100644 index f4dc4d2358..0000000000 --- a/abci/version/version.go +++ /dev/null @@ -1,9 +0,0 @@ -package version - -import ( - "github.com/tendermint/tendermint/version" -) - -// TODO: eliminate this after some version refactor - -const Version = version.ABCIVersion diff --git a/buf.gen.yaml b/buf.gen.yaml deleted file mode 100644 index dc56781dd4..0000000000 --- a/buf.gen.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# The version of the generation template. -# Required. -# The only currently-valid value is v1beta1. -version: v1beta1 - -# The plugins to run. -plugins: - # The name of the plugin. - - name: gogofaster - # The the relative output directory. - out: proto - # Any options to provide to the plugin. - opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative diff --git a/buf.yaml b/buf.yaml deleted file mode 100644 index cc4aced576..0000000000 --- a/buf.yaml +++ /dev/null @@ -1,16 +0,0 @@ -version: v1beta1 - -build: - roots: - - proto - - third_party/proto -lint: - use: - - BASIC - - FILE_LOWER_SNAKE_CASE - - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index 203b3df0dd..e34236acc1 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -6,10 +6,11 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "net" "net/http" "os" + "os/signal" + "syscall" "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" @@ -20,7 +21,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -45,12 +45,19 @@ func main() { keyFile = flag.String("keyfile", "", "absolute path to server key") rootCA = flag.String("rootcafile", "", "absolute path to root CA") prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)") - - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false). - With("module", "priv_val") ) flag.Parse() + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct logger: %v", err) + os.Exit(1) + } + logger = logger.With("module", "priv_val") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger.Info( "Starting private validator", "addr", *addr, @@ -78,7 +85,7 @@ func main() { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(*rootCA) + bs, err := os.ReadFile(*rootCA) if err != nil { fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err) os.Exit(1) @@ -131,9 +138,10 @@ func main() { os.Exit(1) } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - logger.Debug("SignerServer: calling Close") + opctx, opcancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM) + defer opcancel() + go func() { + <-opctx.Done() if *prometheusAddr != "" { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -143,7 +151,7 @@ func main() { } } s.GracefulStop() - }) + }() // Run forever. select {} diff --git a/cmd/tendermint/commands/debug/debug.go b/cmd/tendermint/commands/debug/debug.go index e07f7978de..2bf76babc5 100644 --- a/cmd/tendermint/commands/debug/debug.go +++ b/cmd/tendermint/commands/debug/debug.go @@ -15,7 +15,7 @@ var ( flagProfAddr = "pprof-laddr" flagFrequency = "frequency" - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) ) // DebugCmd defines the root command containing subcommands that assist in diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index cb1cc942a8..0fb5c0f1aa 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -1,9 +1,9 @@ package debug import ( + "context" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -43,7 +43,7 @@ func init() { ) } -func dumpCmdHandler(_ *cobra.Command, args []string) error { +func dumpCmdHandler(cmd *cobra.Command, args []string) error { outDir := args[0] if outDir == "" { return errors.New("invalid output directory") @@ -64,25 +64,27 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return fmt.Errorf("failed to create new http client: %w", err) } + ctx := cmd.Context() + home := viper.GetString(cli.HomeFlag) conf := config.DefaultConfig() conf = conf.SetRoot(home) config.EnsureRoot(conf.RootDir) - dumpDebugData(outDir, conf, rpc) + dumpDebugData(ctx, outDir, conf, rpc) ticker := time.NewTicker(time.Duration(frequency) * time.Second) for range ticker.C { - dumpDebugData(outDir, conf, rpc) + dumpDebugData(ctx, outDir, conf, rpc) } return nil } -func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { +func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() - tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp") if err != nil { logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err) return @@ -90,19 +92,19 @@ func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { defer os.RemoveAll(tmpDir) logger.Info("getting node status...") - if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil { + if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil { logger.Error("failed to dump node status", "error", err) return } logger.Info("getting node network info...") - if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil { + if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil { logger.Error("failed to dump node network info", "error", err) return } logger.Info("getting node consensus state...") - if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil { + if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil { logger.Error("failed to dump node consensus state", "error", err) return } diff --git a/cmd/tendermint/commands/debug/io.go b/cmd/tendermint/commands/debug/io.go index dcfff50c89..bf904cf5c6 100644 --- a/cmd/tendermint/commands/debug/io.go +++ b/cmd/tendermint/commands/debug/io.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -111,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) } diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 3e749e5131..18351db122 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -34,7 +33,8 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + ctx := cmd.Context() + pid, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return err } @@ -56,24 +56,24 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. - tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp") if err != nil { return fmt.Errorf("failed to create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) logger.Info("getting node status...") - if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil { + if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil { return err } logger.Info("getting node network info...") - if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil { + if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil { return err } logger.Info("getting node consensus state...") - if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil { + if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil { return err } @@ -92,7 +92,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } logger.Info("killing Tendermint process") - if err := killProc(pid, tmpDir); err != nil { + if err := killProc(int(pid), tmpDir); err != nil { return err } @@ -105,7 +105,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. @@ -128,7 +128,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the Tendermint process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index fa356c4880..24626207f5 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -3,7 +3,7 @@ package debug import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "os" "path" @@ -15,8 +15,8 @@ import ( // dumpStatus gets node status state dump from the Tendermint RPC and writes it // to file. It returns an error upon failure. -func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { - status, err := rpc.Status(context.Background()) +func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + status, err := rpc.Status(ctx) if err != nil { return fmt.Errorf("failed to get node status: %w", err) } @@ -26,8 +26,8 @@ func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { // dumpNetInfo gets network information state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { - netInfo, err := rpc.NetInfo(context.Background()) +func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + netInfo, err := rpc.NetInfo(ctx) if err != nil { return fmt.Errorf("failed to get node network information: %w", err) } @@ -37,8 +37,8 @@ func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { // dumpConsensusState gets consensus state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { - consDump, err := rpc.DumpConsensusState(context.Background()) +func dumpConsensusState(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + consDump, err := rpc.DumpConsensusState(ctx) if err != nil { return fmt.Errorf("failed to get node consensus dump: %w", err) } @@ -73,10 +73,10 @@ func dumpProfile(dir, addr, profile string, debug int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) } diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index d8b493e3cd..1207c704b0 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -1,11 +1,11 @@ package commands import ( + "encoding/json" "fmt" "github.com/spf13/cobra" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{ func genNodeKey(cmd *cobra.Command, args []string) error { nodeKey := types.GenNodeKey() - bz, err := tmjson.Marshal(nodeKey) + bz, err := json.Marshal(nodeKey) if err != nil { return fmt.Errorf("nodeKey -> json: %w", err) } diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 830518ce98..ac37790ccd 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -1,41 +1,41 @@ package commands import ( + "encoding/json" "fmt" "github.com/spf13/cobra" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) // GenValidatorCmd allows the generation of a keypair for a // validator. -var GenValidatorCmd = &cobra.Command{ - Use: "gen-validator", - Short: "Generate new validator keypair", - RunE: genValidator, -} - -func init() { - GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, - "Key type to generate privval file with. Options: ed25519, secp256k1") -} - -func genValidator(cmd *cobra.Command, args []string) error { - pv, err := privval.GenFilePV("", "", keyType) - if err != nil { - return err - } - - jsbz, err := tmjson.Marshal(pv) - if err != nil { - return fmt.Errorf("validator -> json: %w", err) +func MakeGenValidatorCommand() *cobra.Command { + var keyType string + cmd := &cobra.Command{ + Use: "gen-validator", + Short: "Generate new validator keypair", + RunE: func(cmd *cobra.Command, args []string) error { + pv, err := privval.GenFilePV("", "", keyType) + if err != nil { + return err + } + + jsbz, err := json.Marshal(pv) + if err != nil { + return fmt.Errorf("validator -> json: %w", err) + } + + fmt.Printf("%v\n", string(jsbz)) + + return nil + }, } - fmt.Printf(`%v -`, string(jsbz)) + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") - return nil + return cmd } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index bc94f763b8..33d511097c 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -7,7 +7,8 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" @@ -15,43 +16,41 @@ import ( "github.com/tendermint/tendermint/types" ) -// InitFilesCmd initializes a fresh Tendermint Core instance. -var InitFilesCmd = &cobra.Command{ - Use: "init [full|validator|seed]", - Short: "Initializes a Tendermint node", - ValidArgs: []string{"full", "validator", "seed"}, - // We allow for zero args so we can throw a more informative error - Args: cobra.MaximumNArgs(1), - RunE: initFiles, -} - -var ( - keyType string -) +// MakeInitFilesCommand returns the command to initialize a fresh Tendermint Core instance. +func MakeInitFilesCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var keyType string + cmd := &cobra.Command{ + Use: "init [full|validator|seed]", + Short: "Initializes a Tendermint node", + ValidArgs: []string{"full", "validator", "seed"}, + // We allow for zero args so we can throw a more informative error + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("must specify a node type: tendermint init [validator|full|seed]") + } + conf.Mode = args[0] + return initFilesWithConfig(cmd.Context(), conf, logger) + }, + } -func init() { - InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") -} -func initFiles(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("must specify a node type: tendermint init [validator|full|seed]") - } - config.Mode = args[0] - return initFilesWithConfig(config) + return cmd } -func initFilesWithConfig(config *cfg.Config) error { +func initFilesWithConfig(ctx context.Context, conf *config.Config, logger log.Logger) error { var ( - pv *privval.FilePV - err error + pv *privval.FilePV + err error + keyType string ) - if config.Mode == cfg.ModeValidator { + if conf.Mode == config.ModeValidator { // private validator - privValKeyFile := config.PrivValidator.KeyFile() - privValStateFile := config.PrivValidator.StateFile() + privValKeyFile := conf.PrivValidator.KeyFile() + privValStateFile := conf.PrivValidator.StateFile() if tmos.FileExists(privValKeyFile) { pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile) if err != nil { @@ -65,13 +64,15 @@ func initFilesWithConfig(config *cfg.Config) error { if err != nil { return err } - pv.Save() + if err := pv.Save(); err != nil { + return err + } logger.Info("Generated private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) } } - nodeKeyFile := config.NodeKeyFile() + nodeKeyFile := conf.NodeKeyFile() if tmos.FileExists(nodeKeyFile) { logger.Info("Found node key", "path", nodeKeyFile) } else { @@ -82,7 +83,7 @@ func initFilesWithConfig(config *cfg.Config) error { } // genesis file - genFile := config.GenesisFile() + genFile := conf.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) } else { @@ -98,7 +99,7 @@ func initFilesWithConfig(config *cfg.Config) error { } } - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + ctx, cancel := context.WithTimeout(ctx, ctxTimeout) defer cancel() // if this is a validator we add it to genesis @@ -121,8 +122,10 @@ func initFilesWithConfig(config *cfg.Config) error { } // write config file - cfg.WriteConfigFile(config.RootDir, config) - logger.Info("Generated config", "mode", config.Mode) + if err := config.WriteConfigFile(conf.RootDir, conf); err != nil { + return err + } + logger.Info("Generated config", "mode", conf.Mode) return nil } diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go index 3cd6ef572f..11afa419ea 100644 --- a/cmd/tendermint/commands/inspect.go +++ b/cmd/tendermint/commands/inspect.go @@ -1,21 +1,22 @@ package commands import ( - "context" - "os" "os/signal" "syscall" "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/libs/log" ) -// InspectCmd is the command for starting an inspect server. -var InspectCmd = &cobra.Command{ - Use: "inspect", - Short: "Run an inspect server for investigating Tendermint state", - Long: ` +// InspectCmd constructs the command to start an inspect server. +func MakeInspectCommand(conf *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect", + Short: "Run an inspect server for investigating Tendermint state", + Long: ` inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging issues with Tendermint. @@ -24,40 +25,27 @@ var InspectCmd = &cobra.Command{ The inspect command can be used to query the block and state store using Tendermint RPC calls to debug issues of inconsistent state. `, - - RunE: runInspect, -} - -func init() { - InspectCmd.Flags(). - String("rpc.laddr", - config.RPC.ListenAddress, "RPC listenener address. Port required") - InspectCmd.Flags(). - String("db-backend", - config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") - InspectCmd.Flags(). - String("db-dir", config.DBPath, "database directory") -} - -func runInspect(cmd *cobra.Command, args []string) error { - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) - go func() { - <-c - cancel() - }() - - ins, err := inspect.NewFromConfig(logger, config) - if err != nil { - return err + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM, syscall.SIGINT) + defer cancel() + + ins, err := inspect.NewFromConfig(logger, conf) + if err != nil { + return err + } + + logger.Info("starting inspect server") + if err := ins.Run(ctx); err != nil { + return err + } + return nil + }, } + cmd.Flags().String("rpc.laddr", + conf.RPC.ListenAddress, "RPC listenener address. Port required") + cmd.Flags().String("db-backend", + conf.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + cmd.Flags().String("db-dir", conf.DBPath, "database directory") - logger.Info("starting inspect server") - if err := ins.Run(ctx); err != nil { - return err - } - return nil + return cmd } diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go index 739af4a7d1..1706f8b6a2 100644 --- a/cmd/tendermint/commands/key_migrate.go +++ b/cmd/tendermint/commands/key_migrate.go @@ -6,10 +6,11 @@ import ( "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/scripts/keymigrate" ) -func MakeKeyMigrateCommand() *cobra.Command { +func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "key-migrate", Short: "Run Database key migration", @@ -38,7 +39,7 @@ func MakeKeyMigrateCommand() *cobra.Command { db, err := cfg.DefaultDBProvider(&cfg.DBContext{ ID: dbctx, - Config: config, + Config: conf, }) if err != nil { @@ -58,7 +59,7 @@ func MakeKeyMigrateCommand() *cobra.Command { } // allow database info to be overridden via cli - addDBFlags(cmd) + addDBFlags(cmd, conf) return cmd } diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index f4c9a21da8..0ee7748353 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -6,16 +6,18 @@ import ( "fmt" "net/http" "os" + "os/signal" "path/filepath" "strings" + "syscall" "time" "github.com/spf13/cobra" dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" - tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/light" lproxy "github.com/tendermint/tendermint/light/proxy" lrpc "github.com/tendermint/tendermint/light/rpc" @@ -23,11 +25,58 @@ import ( rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" ) -// LightCmd represents the base command when called without any subcommands -var LightCmd = &cobra.Command{ - Use: "light [chainID]", - Short: "Run a light client proxy server, verifying Tendermint rpc", - Long: `Run a light client proxy server, verifying Tendermint rpc. +// LightCmd constructs the base command called when invoked without any subcommands. +func MakeLightCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var ( + listenAddr string + primaryAddr string + witnessAddrsJoined string + chainID string + dir string + maxOpenConnections int + + sequential bool + trustingPeriod time.Duration + trustedHeight int64 + trustedHash []byte + trustLevelStr string + + logLevel string + logFormat string + + primaryKey = []byte("primary") + witnessesKey = []byte("witnesses") + ) + + checkForExistingProviders := func(db dbm.DB) (string, []string, error) { + primaryBytes, err := db.Get(primaryKey) + if err != nil { + return "", []string{""}, err + } + witnessesBytes, err := db.Get(witnessesKey) + if err != nil { + return "", []string{""}, err + } + witnessesAddrs := strings.Split(string(witnessesBytes), ",") + return string(primaryBytes), witnessesAddrs, nil + } + + saveProviders := func(db dbm.DB, primaryAddr, witnessesAddrs string) error { + err := db.Set(primaryKey, []byte(primaryAddr)) + if err != nil { + return fmt.Errorf("failed to save primary provider: %w", err) + } + err = db.Set(witnessesKey, []byte(witnessesAddrs)) + if err != nil { + return fmt.Errorf("failed to save witness providers: %w", err) + } + return nil + } + + cmd := &cobra.Command{ + Use: "light [chainID]", + Short: "Run a light client proxy server, verifying Tendermint rpc", + Long: `Run a light client proxy server, verifying Tendermint rpc. All calls that can be tracked back to a block header by a proof will be verified before passing them back to the caller. Other than @@ -45,182 +94,134 @@ When /abci_query is called, the Merkle key path format is: Please verify with your application that this Merkle key format is used (true for applications built w/ Cosmos SDK). `, - RunE: runProxy, - Args: cobra.ExactArgs(1), - Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657 + RunE: func(cmd *cobra.Command, args []string) error { + chainID = args[0] + logger.Info("Creating client...", "chainID", chainID) + + var witnessesAddrs []string + if witnessAddrsJoined != "" { + witnessesAddrs = strings.Split(witnessAddrsJoined, ",") + } + + lightDB, err := dbm.NewGoLevelDB("light-client-db", dir) + if err != nil { + return fmt.Errorf("can't create a db: %w", err) + } + // create a prefixed db on the chainID + db := dbm.NewPrefixDB(lightDB, []byte(chainID)) + + if primaryAddr == "" { // check to see if we can start from an existing state + var err error + primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) + if err != nil { + return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) + } + if primaryAddr == "" { + return errors.New("no primary address was provided nor found. Please provide a primary (using -p)." + + " Run the command: tendermint light --help for more information") + } + } else { + err := saveProviders(db, primaryAddr, witnessAddrsJoined) + if err != nil { + logger.Error("Unable to save primary and or witness addresses", "err", err) + } + } + + trustLevel, err := tmmath.ParseFraction(trustLevelStr) + if err != nil { + return fmt.Errorf("can't parse trust level: %w", err) + } + + options := []light.Option{light.Logger(logger)} + + vo := light.SkippingVerification(trustLevel) + if sequential { + vo = light.SequentialVerification() + } + options = append(options, vo) + + // Initiate the light client. If the trusted store already has blocks in it, this + // will be used else we use the trusted options. + c, err := light.NewHTTPClient( + context.Background(), + chainID, + light.TrustOptions{ + Period: trustingPeriod, + Height: trustedHeight, + Hash: trustedHash, + }, + primaryAddr, + witnessesAddrs, + dbs.New(db), + options..., + ) + if err != nil { + return err + } + + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = conf.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = conf.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = maxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn())) + if err != nil { + return err + } + + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + go func() { + <-ctx.Done() + p.Listener.Close() + }() + + logger.Info("Starting proxy...", "laddr", listenAddr) + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { + // Error starting or closing listener: + logger.Error("proxy ListenAndServe", "err", err) + } + + return nil + }, + Args: cobra.ExactArgs(1), + Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657 --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`, -} - -var ( - listenAddr string - primaryAddr string - witnessAddrsJoined string - chainID string - dir string - maxOpenConnections int - - sequential bool - trustingPeriod time.Duration - trustedHeight int64 - trustedHash []byte - trustLevelStr string - - logLevel string - logFormat string - - primaryKey = []byte("primary") - witnessesKey = []byte("witnesses") -) + } -func init() { - LightCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", + cmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "serve the proxy on the given address") - LightCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", + cmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", "connect to a Tendermint node at this address") - LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", + cmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", "tendermint nodes to cross-check the primary node, comma-separated") - LightCmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), + cmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), "specify the directory") - LightCmd.Flags().IntVar( + cmd.Flags().IntVar( &maxOpenConnections, "max-open-connections", 900, "maximum number of simultaneous connections (including WebSocket).") - LightCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour, + cmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour, "trusting period that headers can be verified within. Should be significantly less than the unbonding period") - LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height") - LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash") - LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)") - LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)") - LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3", + cmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height") + cmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash") + cmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)") + cmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)") + cmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3", "trust level. Must be between 1/3 and 3/3", ) - LightCmd.Flags().BoolVar(&sequential, "sequential", false, + cmd.Flags().BoolVar(&sequential, "sequential", false, "sequential verification. Verify all headers sequentially as opposed to using skipping verification", ) -} - -func runProxy(cmd *cobra.Command, args []string) error { - logger, err := log.NewDefaultLogger(logFormat, logLevel, false) - if err != nil { - return err - } - chainID = args[0] - logger.Info("Creating client...", "chainID", chainID) + return cmd - witnessesAddrs := []string{} - if witnessAddrsJoined != "" { - witnessesAddrs = strings.Split(witnessAddrsJoined, ",") - } - - lightDB, err := dbm.NewGoLevelDB("light-client-db", dir) - if err != nil { - return fmt.Errorf("can't create a db: %w", err) - } - // create a prefixed db on the chainID - db := dbm.NewPrefixDB(lightDB, []byte(chainID)) - - if primaryAddr == "" { // check to see if we can start from an existing state - var err error - primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) - if err != nil { - return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) - } - if primaryAddr == "" { - return errors.New("no primary address was provided nor found. Please provide a primary (using -p)." + - " Run the command: tendermint light --help for more information") - } - } else { - err := saveProviders(db, primaryAddr, witnessAddrsJoined) - if err != nil { - logger.Error("Unable to save primary and or witness addresses", "err", err) - } - } - - trustLevel, err := tmmath.ParseFraction(trustLevelStr) - if err != nil { - return fmt.Errorf("can't parse trust level: %w", err) - } - - options := []light.Option{light.Logger(logger)} - - if sequential { - options = append(options, light.SequentialVerification()) - } else { - options = append(options, light.SkippingVerification(trustLevel)) - } - - // Initiate the light client. If the trusted store already has blocks in it, this - // will be used else we use the trusted options. - c, err := light.NewHTTPClient( - context.Background(), - chainID, - light.TrustOptions{ - Period: trustingPeriod, - Height: trustedHeight, - Hash: trustedHash, - }, - primaryAddr, - witnessesAddrs, - dbs.New(db), - options..., - ) - if err != nil { - return err - } - - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes - cfg.MaxOpenConnections = maxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - - p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn())) - if err != nil { - return err - } - - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - p.Listener.Close() - }) - - logger.Info("Starting proxy...", "laddr", listenAddr) - if err := p.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - logger.Error("proxy ListenAndServe", "err", err) - } - - return nil -} - -func checkForExistingProviders(db dbm.DB) (string, []string, error) { - primaryBytes, err := db.Get(primaryKey) - if err != nil { - return "", []string{""}, err - } - witnessesBytes, err := db.Get(witnessesKey) - if err != nil { - return "", []string{""}, err - } - witnessesAddrs := strings.Split(string(witnessesBytes), ",") - return string(primaryBytes), witnessesAddrs, nil -} - -func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error { - err := db.Set(primaryKey, []byte(primaryAddr)) - if err != nil { - return fmt.Errorf("failed to save primary provider: %w", err) - } - err = db.Set(witnessesKey, []byte(witnessesAddrs)) - if err != nil { - return fmt.Errorf("failed to save witness providers: %w", err) - } - return nil } diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go deleted file mode 100644 index 4c71e099a4..0000000000 --- a/cmd/tendermint/commands/probe_upnp.go +++ /dev/null @@ -1,32 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/internal/p2p/upnp" - tmjson "github.com/tendermint/tendermint/libs/json" -) - -// ProbeUpnpCmd adds capabilities to test the UPnP functionality. -var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Short: "Test UPnP functionality", - RunE: probeUpnp, -} - -func probeUpnp(cmd *cobra.Command, args []string) error { - capabilities, err := upnp.Probe(logger) - if err != nil { - fmt.Println("Probe failed: ", err) - } else { - fmt.Println("Probe success!") - jsonBytes, err := tmjson.Marshal(capabilities) - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - } - return nil -} diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index 58f11657b2..00a1d14b20 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -3,6 +3,7 @@ package commands import ( "errors" "fmt" + "path/filepath" "strings" "github.com/spf13/cobra" @@ -16,6 +17,8 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -24,59 +27,68 @@ const ( reindexFailed = "event re-index failed: " ) -// ReIndexEventCmd allows re-index the event by given block height interval -var ReIndexEventCmd = &cobra.Command{ - Use: "reindex-event", - Short: "reindex events to the event store backends", - Long: ` +// MakeReindexEventCommand constructs a command to re-index events in a block height interval. +func MakeReindexEventCommand(conf *tmcfg.Config, logger log.Logger) *cobra.Command { + var ( + startHeight int64 + endHeight int64 + ) + + cmd := &cobra.Command{ + Use: "reindex-event", + Short: "reindex events to the event store backends", + Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks, -you can run this command when the event store backend dropped/disconnected or you want to -replace the backend. The default start-height is 0, meaning the tooling will start -reindex from the base block height(inclusive); and the default end-height is 0, meaning +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. `, - Example: ` + Example: ` tendermint reindex-event tendermint reindex-event --start-height 2 tendermint reindex-event --end-height 10 tendermint reindex-event --start-height 2 --end-height 10 `, - Run: func(cmd *cobra.Command, args []string) { - bs, ss, err := loadStateAndBlockStore(config) - if err != nil { - fmt.Println(reindexFailed, err) - return - } - - if err := checkValidHeight(bs); err != nil { - fmt.Println(reindexFailed, err) - return - } + RunE: func(cmd *cobra.Command, args []string) error { + bs, ss, err := loadStateAndBlockStore(conf) + if err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - es, err := loadEventSinks(config) - if err != nil { - fmt.Println(reindexFailed, err) - return - } + cvhArgs := checkValidHeightArgs{ + startHeight: startHeight, + endHeight: endHeight, + } + if err := checkValidHeight(bs, cvhArgs); err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - if err = eventReIndex(cmd, es, bs, ss); err != nil { - fmt.Println(reindexFailed, err) - return - } + es, err := loadEventSinks(conf) + if err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - fmt.Println("event re-index finished") - }, -} + riArgs := eventReIndexArgs{ + startHeight: startHeight, + endHeight: endHeight, + sinks: es, + blockStore: bs, + stateStore: ss, + } + if err := eventReIndex(cmd, riArgs); err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } -var ( - startHeight int64 - endHeight int64 -) + logger.Info("event re-index finished") + return nil + }, + } -func init() { - ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index") - ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index") + cmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index") + cmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index") + return cmd } func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { @@ -107,7 +119,7 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { if conn == "" { return nil, errors.New("the psql connection settings cannot be empty") } - es, err := psql.NewEventSink(conn, chainID) + es, err := psql.NewEventSink(conn, cfg.ChainID()) if err != nil { return nil, err } @@ -132,6 +144,10 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { dbType := dbm.BackendType(cfg.DBBackend) + if !os.FileExists(filepath.Join(cfg.DBDir(), "blockstore.db")) { + return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir()) + } + // Get BlockStore blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { @@ -139,6 +155,10 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, } blockStore := store.NewBlockStore(blockStoreDB) + if !os.FileExists(filepath.Join(cfg.DBDir(), "state.db")) { + return nil, nil, fmt.Errorf("no blockstore found in %v", cfg.DBDir()) + } + // Get StateStore stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { @@ -149,24 +169,31 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, return blockStore, stateStore, nil } -func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error { +type eventReIndexArgs struct { + startHeight int64 + endHeight int64 + sinks []indexer.EventSink + blockStore state.BlockStore + stateStore state.Store +} +func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { var bar progressbar.Bar - bar.NewOption(startHeight-1, endHeight) + bar.NewOption(args.startHeight-1, args.endHeight) fmt.Println("start re-indexing events:") defer bar.Finish() - for i := startHeight; i <= endHeight; i++ { + for i := args.startHeight; i <= args.endHeight; i++ { select { case <-cmd.Context().Done(): return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err()) default: - b := bs.LoadBlock(i) + b := args.blockStore.LoadBlock(i) if b == nil { return fmt.Errorf("not able to load block at height %d from the blockstore", i) } - r, err := ss.LoadABCIResponses(i) + r, err := args.stateStore.LoadABCIResponses(i) if err != nil { return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) } @@ -182,11 +209,11 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor if e.NumTxs > 0 { batch = indexer.NewBatch(e.NumTxs) - for i, tx := range b.Data.Txs { + for i := range b.Data.Txs { tr := abcitypes.TxResult{ Height: b.Height, Index: uint32(i), - Tx: tx, + Tx: b.Data.Txs[i], Result: *(r.DeliverTxs[i]), } @@ -194,7 +221,7 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor } } - for _, sink := range es { + for _, sink := range args.sinks { if err := sink.IndexBlockEvents(e); err != nil { return fmt.Errorf("block event re-index at height %d failed: %w", i, err) } @@ -213,40 +240,45 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor return nil } -func checkValidHeight(bs state.BlockStore) error { +type checkValidHeightArgs struct { + startHeight int64 + endHeight int64 +} + +func checkValidHeight(bs state.BlockStore, args checkValidHeightArgs) error { base := bs.Base() - if startHeight == 0 { - startHeight = base + if args.startHeight == 0 { + args.startHeight = base fmt.Printf("set the start block height to the base height of the blockstore %d \n", base) } - if startHeight < base { + if args.startHeight < base { return fmt.Errorf("%s (requested start height: %d, base height: %d)", - coretypes.ErrHeightNotAvailable, startHeight, base) + coretypes.ErrHeightNotAvailable, args.startHeight, base) } height := bs.Height() - if startHeight > height { + if args.startHeight > height { return fmt.Errorf( - "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height) + "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, args.startHeight, height) } - if endHeight == 0 || endHeight > height { - endHeight = height + if args.endHeight == 0 || args.endHeight > height { + args.endHeight = height fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height) } - if endHeight < base { + if args.endHeight < base { return fmt.Errorf( - "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base) + "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, args.endHeight, base) } - if endHeight < startHeight { + if args.endHeight < args.startHeight { return fmt.Errorf( "%s (requested the end height: %d is less than the start height: %d)", - coretypes.ErrInvalidRequest, startHeight, endHeight) + coretypes.ErrInvalidRequest, args.startHeight, args.endHeight) } return nil diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 452a6b2a84..91b1ba42a6 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -10,11 +10,15 @@ import ( "github.com/stretchr/testify/require" abcitypes "github.com/tendermint/tendermint/abci/types" - tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/libs/log" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tm-db" + + _ "github.com/lib/pq" // for the psql sink ) const ( @@ -22,13 +26,15 @@ const ( base int64 = 2 ) -func setupReIndexEventCmd() *cobra.Command { +func setupReIndexEventCmd(ctx context.Context, conf *config.Config, logger log.Logger) *cobra.Command { + cmd := MakeReindexEventCommand(conf, logger) + reIndexEventCmd := &cobra.Command{ - Use: ReIndexEventCmd.Use, + Use: cmd.Use, Run: func(cmd *cobra.Command, args []string) {}, } - _ = reIndexEventCmd.ExecuteContext(context.Background()) + _ = reIndexEventCmd.ExecuteContext(ctx) return reIndexEventCmd } @@ -65,10 +71,7 @@ func TestReIndexEventCheckHeight(t *testing.T) { } for _, tc := range testCases { - startHeight = tc.startHeight - endHeight = tc.endHeight - - err := checkValidHeight(mockBlockStore) + err := checkValidHeight(mockBlockStore, checkValidHeightArgs{startHeight: tc.startHeight, endHeight: tc.endHeight}) if tc.validHeight { require.NoError(t, err) } else { @@ -94,7 +97,7 @@ func TestLoadEventSink(t *testing.T) { } for _, tc := range testCases { - cfg := tmcfg.TestConfig() + cfg := config.TestConfig() cfg.TxIndex.Indexer = tc.sinks cfg.TxIndex.PsqlConn = tc.connURL _, err := loadEventSinks(cfg) @@ -107,12 +110,29 @@ func TestLoadEventSink(t *testing.T) { } func TestLoadBlockStore(t *testing.T) { - bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig()) + testCfg, err := config.ResetTestRoot(t.Name()) + require.NoError(t, err) + testCfg.DBBackend = "goleveldb" + _, _, err = loadStateAndBlockStore(testCfg) + // we should return an error because the state store and block store + // don't yet exist + require.Error(t, err) + + dbType := dbm.BackendType(testCfg.DBBackend) + bsdb, err := dbm.NewDB("blockstore", dbType, testCfg.DBDir()) + require.NoError(t, err) + bsdb.Close() + + ssdb, err := dbm.NewDB("state", dbType, testCfg.DBDir()) + require.NoError(t, err) + ssdb.Close() + + bs, ss, err := loadStateAndBlockStore(testCfg) require.NoError(t, err) require.NotNil(t, bs) require.NotNil(t, ss) - } + func TestReIndexEvent(t *testing.T) { mockBlockStore := &mocks.BlockStore{} mockStateStore := &mocks.Store{} @@ -157,11 +177,22 @@ func TestReIndexEvent(t *testing.T) { {height, height, false}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + conf := config.DefaultConfig() + for _, tc := range testCases { - startHeight = tc.startHeight - endHeight = tc.endHeight + err := eventReIndex( + setupReIndexEventCmd(ctx, conf, logger), + eventReIndexArgs{ + sinks: []indexer.EventSink{mockEventSink}, + blockStore: mockBlockStore, + stateStore: mockStateStore, + startHeight: tc.startHeight, + endHeight: tc.endHeight, + }) - err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore) if tc.reIndexErr { require.Error(t, err) } else { diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index e922740424..15520c469f 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -2,24 +2,29 @@ package commands import ( "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/libs/log" ) -// ReplayCmd allows replaying of messages from the WAL. -var ReplayCmd = &cobra.Command{ - Use: "replay", - Short: "Replay messages from WAL", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) - }, +// MakeReplayCommand constructs a command to replay messages from the WAL into consensus. +func MakeReplayCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "replay", + Short: "Replay messages from WAL", + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, false) + }, + } } -// ReplayConsoleCmd allows replaying of messages from the WAL in a -// console. -var ReplayConsoleCmd = &cobra.Command{ - Use: "replay-console", - Short: "Replay messages from WAL in a console", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) - }, +// MakeReplayConsoleCommand constructs a command to replay WAL messages to stdout. +func MakeReplayConsoleCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "replay-console", + Short: "Replay messages from WAL in a console", + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, true) + }, + } } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 5f3e54700d..ce0798e45f 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -5,70 +5,79 @@ import ( "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) -// ResetAllCmd removes the database of this Tendermint core -// instance. -var ResetAllCmd = &cobra.Command{ - Use: "unsafe-reset-all", - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - RunE: resetAll, +// MakeResetAllCommand constructs a command that removes the database of +// the specified Tendermint core instance. +func MakeResetAllCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var keyType string + + cmd := &cobra.Command{ + Use: "unsafe-reset-all", + Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", + RunE: func(cmd *cobra.Command, args []string) error { + return resetAll(conf.DBDir(), conf.PrivValidator.KeyFile(), + conf.PrivValidator.StateFile(), logger, keyType) + }, + } + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") + + return cmd } -var keepAddrBook bool +func MakeResetPrivateValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var keyType string -func init() { - ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") - ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + cmd := &cobra.Command{ + Use: "unsafe-reset-priv-validator", + Short: "(unsafe) Reset this node's validator to genesis state", + RunE: func(cmd *cobra.Command, args []string) error { + return resetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType) + }, + } + + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") -} + return cmd -// ResetPrivValidatorCmd resets the private validator files. -var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe-reset-priv-validator", - Short: "(unsafe) Reset this node's validator to genesis state", - RunE: resetPrivValidator, } // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetAll(cmd *cobra.Command, args []string) error { - return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(), - config.PrivValidator.StateFile(), logger) -} // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetPrivValidator(cmd *cobra.Command, args []string) error { - return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger) -} -// ResetAll removes address book files plus all data, and resets the privValdiator data. +// resetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. -func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error { +func resetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { - logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) + logger.Error("error removing all blockchain history", "dir", dbDir, "err", err) } // recreate the dbDir since the privVal state needs to live there if err := tmos.EnsureDir(dbDir, 0700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } - return resetFilePV(privValKeyFile, privValStateFile, logger) + return resetFilePV(privValKeyFile, privValStateFile, logger, keyType) } -func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) error { +func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { if _, err := os.Stat(privValKeyFile); err == nil { pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile) if err != nil { return err } - pv.Reset() + if err := pv.Reset(); err != nil { + return err + } logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { @@ -76,7 +85,9 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err if err != nil { return err } - pv.Save() + if err := pv.Save(); err != nil { + return err + } logger.Info("Generated private validator file", "keyFile", privValKeyFile, "stateFile", privValStateFile) } diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index 5aff232beb..a604341783 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -5,14 +5,15 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/state" ) -var RollbackStateCmd = &cobra.Command{ - Use: "rollback", - Short: "rollback tendermint state by one height", - Long: ` +func MakeRollbackStateCommand(conf *config.Config) *cobra.Command { + return &cobra.Command{ + Use: "rollback", + Short: "rollback tendermint state by one height", + Long: ` A state rollback is performed to recover from an incorrect application state transition, when Tendermint has persisted an incorrect app hash and is thus unable to make progress. Rollback overwrites a state at height n with the state at height n - 1. @@ -20,26 +21,32 @@ The application should also roll back to height n - 1. No blocks are removed, so restarting Tendermint the transactions in block n will be re-executed against the application. `, - RunE: func(cmd *cobra.Command, args []string) error { - height, hash, err := RollbackState(config) - if err != nil { - return fmt.Errorf("failed to rollback state: %w", err) - } - - fmt.Printf("Rolled back state to height %d and hash %v", height, hash) - return nil - }, + RunE: func(cmd *cobra.Command, args []string) error { + height, hash, err := RollbackState(conf) + if err != nil { + return fmt.Errorf("failed to rollback state: %w", err) + } + + fmt.Printf("Rolled back state to height %d and hash %X", height, hash) + return nil + }, + } + } // RollbackState takes the state at the current height n and overwrites it with the state // at height n - 1. Note state here refers to tendermint state not application state. // Returns the latest state height and app hash alongside an error if there was one. -func RollbackState(config *cfg.Config) (int64, []byte, error) { +func RollbackState(config *config.Config) (int64, []byte, error) { // use the parsed config to load the block and state store blockStore, stateStore, err := loadStateAndBlockStore(config) if err != nil { return -1, nil, err } + defer func() { + _ = blockStore.Close() + _ = stateStore.Close() + }() // rollback the last state return state.Rollback(blockStore, stateStore) diff --git a/cmd/tendermint/commands/rollback_test.go b/cmd/tendermint/commands/rollback_test.go new file mode 100644 index 0000000000..167fbc1f38 --- /dev/null +++ b/cmd/tendermint/commands/rollback_test.go @@ -0,0 +1,74 @@ +package commands_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/cmd/tendermint/commands" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/rpc/client/local" + rpctest "github.com/tendermint/tendermint/rpc/test" + e2e "github.com/tendermint/tendermint/test/e2e/app" +) + +func TestRollbackIntegration(t *testing.T) { + var height int64 + dir := t.TempDir() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) + cfg.BaseConfig.DBBackend = "goleveldb" + app, err := e2e.NewApplication(e2e.DefaultConfig(dir)) + + t.Run("First run", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + require.NoError(t, err) + node, _, err := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) + require.NoError(t, err) + + time.Sleep(3 * time.Second) + cancel() + node.Wait() + require.False(t, node.IsRunning()) + }) + + t.Run("Rollback", func(t *testing.T) { + require.NoError(t, app.Rollback()) + height, _, err = commands.RollbackState(cfg) + require.NoError(t, err) + + }) + + t.Run("Restart", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout) + require.NoError(t, err2) + + logger := log.NewTestingLogger(t) + + client, err := local.New(logger, node2.(local.NodeService)) + require.NoError(t, err) + + ticker := time.NewTicker(200 * time.Millisecond) + for { + select { + case <-ctx.Done(): + t.Fatalf("failed to make progress after 20 seconds. Min height: %d", height) + case <-ticker.C: + status, err := client.Status(ctx) + require.NoError(t, err) + + if status.SyncInfo.LatestBlockHeight > height { + return + } + } + } + }) + +} diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 2289ae363e..082bd29033 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -2,65 +2,62 @@ package commands import ( "fmt" + "os" + "path/filepath" "time" "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/log" ) -var ( - config = cfg.DefaultConfig() - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - ctxTimeout = 4 * time.Second -) - -func init() { - registerFlagsRootCmd(RootCmd) -} - -func registerFlagsRootCmd(cmd *cobra.Command) { - cmd.PersistentFlags().String("log-level", config.LogLevel, "log level") -} +const ctxTimeout = 4 * time.Second // ParseConfig retrieves the default environment configuration, // sets up the Tendermint root and ensures that the root exists -func ParseConfig() (*cfg.Config, error) { - conf := cfg.DefaultConfig() - err := viper.Unmarshal(conf) - if err != nil { +func ParseConfig(conf *config.Config) (*config.Config, error) { + if err := viper.Unmarshal(conf); err != nil { return nil, err } + conf.SetRoot(conf.RootDir) - cfg.EnsureRoot(conf.RootDir) + if err := conf.ValidateBasic(); err != nil { - return nil, fmt.Errorf("error in config file: %v", err) + return nil, fmt.Errorf("error in config file: %w", err) } return conf, nil } -// RootCmd is the root command for Tendermint core. -var RootCmd = &cobra.Command{ - Use: "tendermint", - Short: "BFT state machine replication for applications in any programming languages", - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - if cmd.Name() == VersionCmd.Name() { - return nil - } - - config, err = ParseConfig() - if err != nil { - return err - } +// RootCommand constructs the root command-line entry point for Tendermint core. +func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "tendermint", + Short: "BFT state machine replication for applications in any programming languages", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Name() == VersionCmd.Name() { + return nil + } + + if err := cli.BindFlagsLoadViper(cmd, args); err != nil { + return err + } + + pconf, err := ParseConfig(conf) + if err != nil { + return err + } + *conf = *pconf + config.EnsureRoot(conf.RootDir) - logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false) - if err != nil { - return err - } - - logger = logger.With("module", "main") - return nil - }, + return nil + }, + } + cmd.PersistentFlags().StringP(cli.HomeFlag, "", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir)), "directory for config and data") + cmd.PersistentFlags().Bool(cli.TraceFlag, false, "print out full stack trace on errors") + cmd.PersistentFlags().String("log-level", conf.LogLevel, "log level") + cobra.OnInitialize(func() { cli.InitEnv("TM") }) + return cmd } diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index cd4bc9f5f7..2eade59b67 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -2,10 +2,8 @@ package commands import ( "fmt" - "io/ioutil" "os" "path/filepath" - "strconv" "testing" "github.com/spf13/cobra" @@ -15,46 +13,42 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" ) // clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("TMHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - panic(err) - } +func clearConfig(t *testing.T, dir string) *cfg.Config { + t.Helper() + require.NoError(t, os.Unsetenv("TMHOME")) + require.NoError(t, os.Unsetenv("TM_HOME")) + require.NoError(t, os.RemoveAll(dir)) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() - config = cfg.DefaultConfig() + conf := cfg.DefaultConfig() + conf.RootDir = dir + return conf } // prepare new rootCmd -func testRootCmd() *cobra.Command { - rootCmd := &cobra.Command{ - Use: RootCmd.Use, - PersistentPreRunE: RootCmd.PersistentPreRunE, - Run: func(cmd *cobra.Command, args []string) {}, - } - registerFlagsRootCmd(rootCmd) +func testRootCmd(conf *cfg.Config) *cobra.Command { + logger := log.NewNopLogger() + cmd := RootCommand(conf, logger) + cmd.RunE = func(cmd *cobra.Command, args []string) error { return nil } + var l string - rootCmd.PersistentFlags().String("log", l, "Log") - return rootCmd + cmd.PersistentFlags().String("log", l, "Log") + return cmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(rootDir) +func testSetup(t *testing.T, conf *cfg.Config, args []string, env map[string]string) error { + t.Helper() - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir) + cmd := testRootCmd(conf) + viper.Set(cli.HomeFlag, conf.RootDir) // run with the args and env - args = append([]string{rootCmd.Use}, args...) + args = append([]string{cmd.Use}, args...) return cli.RunWithArgs(cmd, args, env) } @@ -72,22 +66,25 @@ func TestRootHome(t *testing.T) { } for i, tc := range cases { - idxString := strconv.Itoa(i) + t.Run(fmt.Sprint(i), func(t *testing.T) { + conf := clearConfig(t, tc.root) - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) + err := testSetup(t, conf, tc.args, tc.env) + require.NoError(t, err) - assert.Equal(t, tc.root, config.RootDir, idxString) - assert.Equal(t, tc.root, config.P2P.RootDir, idxString) - assert.Equal(t, tc.root, config.Consensus.RootDir, idxString) - assert.Equal(t, tc.root, config.Mempool.RootDir, idxString) + require.Equal(t, tc.root, conf.RootDir) + require.Equal(t, tc.root, conf.P2P.RootDir) + require.Equal(t, tc.root, conf.Consensus.RootDir) + require.Equal(t, tc.root, conf.Mempool.RootDir) + }) } } func TestRootFlagsEnv(t *testing.T) { - // defaults defaults := cfg.DefaultConfig() + defaultDir := t.TempDir() + defaultLogLvl := defaults.LogLevel cases := []struct { @@ -102,19 +99,20 @@ func TestRootFlagsEnv(t *testing.T) { {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env } - defaultRoot := t.TempDir() for i, tc := range cases { - idxString := strconv.Itoa(i) + t.Run(fmt.Sprint(i), func(t *testing.T) { + conf := clearConfig(t, defaultDir) + + err := testSetup(t, conf, tc.args, tc.env) + require.NoError(t, err) - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) + assert.Equal(t, tc.logLevel, conf.LogLevel) + }) - assert.Equal(t, tc.logLevel, config.LogLevel, idxString) } } func TestRootConfig(t *testing.T) { - // write non-default config nonDefaultLogLvl := "debug" cvals := map[string]string{ @@ -122,9 +120,8 @@ func TestRootConfig(t *testing.T) { } cases := []struct { - args []string - env map[string]string - + args []string + env map[string]string logLvl string }{ {nil, nil, nonDefaultLogLvl}, // should load config @@ -133,29 +130,30 @@ func TestRootConfig(t *testing.T) { } for i, tc := range cases { - defaultRoot := t.TempDir() - idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - - // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") - err := tmos.EnsureDir(configFilePath, 0700) - require.Nil(t, err) - - // write the non-defaults to a different path - // TODO: support writing sub configs so we can test that too - err = WriteConfigVals(configFilePath, cvals) - require.Nil(t, err) - - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) - - // run with the args and env - tc.args = append([]string{rootCmd.Use}, tc.args...) - err = cli.RunWithArgs(cmd, tc.args, tc.env) - require.Nil(t, err, idxString) - - assert.Equal(t, tc.logLvl, config.LogLevel, idxString) + t.Run(fmt.Sprint(i), func(t *testing.T) { + defaultRoot := t.TempDir() + conf := clearConfig(t, defaultRoot) + conf.LogLevel = tc.logLvl + + // XXX: path must match cfg.defaultConfigPath + configFilePath := filepath.Join(defaultRoot, "config") + err := tmos.EnsureDir(configFilePath, 0700) + require.NoError(t, err) + + // write the non-defaults to a different path + // TODO: support writing sub configs so we can test that too + err = WriteConfigVals(configFilePath, cvals) + require.NoError(t, err) + + cmd := testRootCmd(conf) + + // run with the args and env + tc.args = append([]string{cmd.Use}, tc.args...) + err = cli.RunWithArgs(cmd, tc.args, tc.env) + require.NoError(t, err) + + require.Equal(t, tc.logLvl, conf.LogLevel) + }) } } @@ -167,5 +165,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index a5fa72ed57..afd3ae8f1c 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -3,149 +3,128 @@ package commands import ( "bytes" "crypto/sha256" - "errors" - "flag" "fmt" "io" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" - tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/log" ) var ( genesisHash []byte ) -// AddNodeFlags exposes some common configuration options on the command-line -// These are exposed for convenience of commands embedding a tendermint node -func AddNodeFlags(cmd *cobra.Command) { +// AddNodeFlags exposes some common configuration options from conf in the flag +// set for cmd. This is a convenience for commands embedding a Tendermint node. +func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) { // bind flags - cmd.Flags().String("moniker", config.Moniker, "node name") + cmd.Flags().String("moniker", conf.Moniker, "node name") // mode flags - cmd.Flags().String("mode", config.Mode, "node mode (full | validator | seed)") + cmd.Flags().String("mode", conf.Mode, "node mode (full | validator | seed)") // priv val flags cmd.Flags().String( "priv-validator-laddr", - config.PrivValidator.ListenAddr, + conf.PrivValidator.ListenAddr, "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing") - - // TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle - // This check was added to give users an upgrade prompt to use the new flag for syncing. - // - // The pflag package does not have a native way to print a depcrecation warning - // and return an error. This logic was added to print a deprecation message to the user - // and then crash if the user attempts to use the old --fast-sync flag. - fs := flag.NewFlagSet("", flag.ExitOnError) - fs.Func("fast-sync", "deprecated", - func(string) error { - return errors.New("--fast-sync has been deprecated, please use --blocksync.enable") - }) - cmd.Flags().AddGoFlagSet(fs) - - cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck + cmd.Flags().BytesHexVar( &genesisHash, "genesis-hash", []byte{}, "optional SHA-256 hash of the genesis file") - cmd.Flags().Int64("consensus.double-sign-check-height", config.Consensus.DoubleSignCheckHeight, + cmd.Flags().Int64("consensus.double-sign-check-height", conf.Consensus.DoubleSignCheckHeight, "how many blocks to look back to check existence of the node's "+ "consensus votes before joining consensus") // abci flags cmd.Flags().String( "proxy-app", - config.ProxyApp, + conf.ProxyApp, "proxy app address, or one of: 'kvstore',"+ " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") - cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") + cmd.Flags().String("abci", conf.ABCI, "specify abci transport (socket | grpc)") // rpc flags - cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") - cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") + cmd.Flags().String("rpc.laddr", conf.RPC.ListenAddress, "RPC listen address. Port required") + cmd.Flags().Bool("rpc.unsafe", conf.RPC.Unsafe, "enabled unsafe rpc methods") + cmd.Flags().String("rpc.pprof-laddr", conf.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") // p2p flags cmd.Flags().String( "p2p.laddr", - config.P2P.ListenAddress, + conf.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") - cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") - cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") - cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") + cmd.Flags().String("p2p.seeds", conf.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck + cmd.Flags().String("p2p.persistent-peers", conf.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") + cmd.Flags().Bool("p2p.upnp", conf.P2P.UPNP, "enable/disable UPNP port forwarding") + cmd.Flags().Bool("p2p.pex", conf.P2P.PexReactor, "enable/disable Peer-Exchange") + cmd.Flags().String("p2p.private-peer-ids", conf.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") // consensus flags cmd.Flags().Bool( "consensus.create-empty-blocks", - config.Consensus.CreateEmptyBlocks, + conf.Consensus.CreateEmptyBlocks, "set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().String( "consensus.create-empty-blocks-interval", - config.Consensus.CreateEmptyBlocksInterval.String(), + conf.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") - addDBFlags(cmd) + addDBFlags(cmd, conf) } -func addDBFlags(cmd *cobra.Command) { +func addDBFlags(cmd *cobra.Command, conf *cfg.Config) { cmd.Flags().String( "db-backend", - config.DBBackend, + conf.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") cmd.Flags().String( "db-dir", - config.DBPath, + conf.DBPath, "database directory") } // NewRunNodeCmd returns the command that allows the CLI to start a node. // It can be used with a custom PrivValidator and in-process ABCI application. -func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command { +func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { - if err := checkGenesisHash(config); err != nil { + if err := checkGenesisHash(conf); err != nil { return err } - n, err := nodeProvider(config, logger) + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() + + n, err := nodeProvider(ctx, conf, logger) if err != nil { return fmt.Errorf("failed to create node: %w", err) } - if err := n.Start(); err != nil { + if err := n.Start(ctx); err != nil { return fmt.Errorf("failed to start node: %w", err) } logger.Info("started node", "node", n.String()) - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - if n.IsRunning() { - if err := n.Stop(); err != nil { - logger.Error("unable to stop the node", "error", err) - } - } - }) - - // Run forever. - select {} + <-ctx.Done() + return nil }, } - AddNodeFlags(cmd) + AddNodeFlags(cmd, conf) return cmd } diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go index 488f4c3228..9183a7c5e2 100644 --- a/cmd/tendermint/commands/show_node_id.go +++ b/cmd/tendermint/commands/show_node_id.go @@ -4,21 +4,22 @@ import ( "fmt" "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" ) -// ShowNodeIDCmd dumps node's ID to the standard output. -var ShowNodeIDCmd = &cobra.Command{ - Use: "show-node-id", - Short: "Show this node's ID", - RunE: showNodeID, -} +// MakeShowNodeIDCommand constructs a command to dump the node ID to stdout. +func MakeShowNodeIDCommand(conf *config.Config) *cobra.Command { + return &cobra.Command{ + Use: "show-node-id", + Short: "Show this node's ID", + RunE: func(cmd *cobra.Command, args []string) error { + nodeKeyID, err := conf.LoadNodeKeyID() + if err != nil { + return err + } -func showNodeID(cmd *cobra.Command, args []string) error { - nodeKeyID, err := config.LoadNodeKeyID() - if err != nil { - return err + fmt.Println(nodeKeyID) + return nil + }, } - - fmt.Println(nodeKeyID) - return nil } diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 47b372c613..a0e8d75666 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -6,74 +6,78 @@ import ( "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" ) -// ShowValidatorCmd adds capabilities for showing the validator info. -var ShowValidatorCmd = &cobra.Command{ - Use: "show-validator", - Short: "Show this node's validator info", - RunE: showValidator, -} - -func showValidator(cmd *cobra.Command, args []string) error { - var ( - pubKey crypto.PubKey - err error - ) +// MakeShowValidatorCommand constructs a command to show the validator info. +func MakeShowValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "show-validator", + Short: "Show this node's validator info", + RunE: func(cmd *cobra.Command, args []string) error { + var ( + pubKey crypto.PubKey + err error + bctx = cmd.Context() + ) + //TODO: remove once gRPC is the only supported protocol + protocol, _ := tmnet.ProtocolAndAddress(conf.PrivValidator.ListenAddr) + switch protocol { + case "grpc": + pvsc, err := tmgrpc.DialRemoteSigner( + bctx, + conf.PrivValidator, + conf.ChainID(), + logger, + conf.Instrumentation.Prometheus, + ) + if err != nil { + return fmt.Errorf("can't connect to remote validator %w", err) + } - //TODO: remove once gRPC is the only supported protocol - protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr) - switch protocol { - case "grpc": - pvsc, err := tmgrpc.DialRemoteSigner( - config.PrivValidator, - config.ChainID(), - logger, - config.Instrumentation.Prometheus, - ) - if err != nil { - return fmt.Errorf("can't connect to remote validator %w", err) - } + ctx, cancel := context.WithTimeout(bctx, ctxTimeout) + defer cancel() - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + pubKey, err = pvsc.GetPubKey(ctx) + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + default: - pubKey, err = pvsc.GetPubKey(ctx) - if err != nil { - return fmt.Errorf("can't get pubkey: %w", err) - } - default: + keyFilePath := conf.PrivValidator.KeyFile() + if !tmos.FileExists(keyFilePath) { + return fmt.Errorf("private validator file %s does not exist", keyFilePath) + } - keyFilePath := config.PrivValidator.KeyFile() - if !tmos.FileExists(keyFilePath) { - return fmt.Errorf("private validator file %s does not exist", keyFilePath) - } + pv, err := privval.LoadFilePV(keyFilePath, conf.PrivValidator.StateFile()) + if err != nil { + return err + } - pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile()) - if err != nil { - return err - } + ctx, cancel := context.WithTimeout(bctx, ctxTimeout) + defer cancel() - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + pubKey, err = pv.GetPubKey(ctx) + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + } - pubKey, err = pv.GetPubKey(ctx) - if err != nil { - return fmt.Errorf("can't get pubkey: %w", err) - } - } + bz, err := jsontypes.Marshal(pubKey) + if err != nil { + return fmt.Errorf("failed to marshal private validator pubkey: %w", err) + } - bz, err := tmjson.Marshal(pubKey) - if err != nil { - return fmt.Errorf("failed to marshal private validator pubkey: %w", err) + fmt.Println(string(bz)) + return nil + }, } - fmt.Println(string(bz)) - return nil } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index ef46f54280..82954e41b6 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -13,283 +13,319 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) -var ( - nValidators int - nNonValidators int - initialHeight int64 - configFile string - outputDir string - nodeDirPrefix string - - populatePersistentPeers bool - hostnamePrefix string - hostnameSuffix string - startingIPAddress string - hostnames []string - p2pPort int - randomMonikers bool -) - const ( nodeDirPerm = 0755 ) -func init() { - TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, +// MakeTestnetFilesCommand constructs a command to generate testnet config files. +func MakeTestnetFilesCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "testnet", + Short: "Initialize files for a Tendermint testnet", + Long: `testnet will create "v" + "n" number of directories and populate each with +necessary files (private validator, genesis, config, etc.). + +Note, strict routability for addresses is turned off in the config file. + +Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs. + +Example: + + tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 + `, + } + var ( + nValidators int + nNonValidators int + initialHeight int64 + configFile string + outputDir string + nodeDirPrefix string + + populatePersistentPeers bool + hostnamePrefix string + hostnameSuffix string + startingIPAddress string + hostnames []string + p2pPort int + randomMonikers bool + keyType string + ) + + cmd.Flags().IntVar(&nValidators, "v", 4, "number of validators to initialize the testnet with") - TestnetFilesCmd.Flags().StringVar(&configFile, "config", "", + cmd.Flags().StringVar(&configFile, "config", "", "config file to use (note some options may be overwritten)") - TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, + cmd.Flags().IntVar(&nNonValidators, "n", 0, "number of non-validators to initialize the testnet with") - TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", + cmd.Flags().StringVar(&outputDir, "o", "./mytestnet", "directory to store initialization data for the testnet") - TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", + cmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", "prefix the directory name for each node with (node results in node0, node1, ...)") - TestnetFilesCmd.Flags().Int64Var(&initialHeight, "initial-height", 0, + cmd.Flags().Int64Var(&initialHeight, "initial-height", 0, "initial height of the first block") - TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, + cmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, "update config of each node with the list of persistent peers build using either"+ " hostname-prefix or"+ " starting-ip-address") - TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", + cmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", "hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") - TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", + cmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", "hostname suffix ("+ "\".xyz.com\""+ " results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)") - TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", + cmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", "starting IP address ("+ "\"192.168.0.1\""+ " results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") - TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, + cmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, "manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") - TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, + cmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, "P2P Port") - TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, + cmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, "randomize the moniker for each generated node") - TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") -} -// TestnetFilesCmd allows initialisation of files for a Tendermint testnet. -var TestnetFilesCmd = &cobra.Command{ - Use: "testnet", - Short: "Initialize files for a Tendermint testnet", - Long: `testnet will create "v" + "n" number of directories and populate each with -necessary files (private validator, genesis, config, etc.). - -Note, strict routability for addresses is turned off in the config file. - -Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs. - -Example: - - tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 - `, - RunE: testnetFiles, -} - -func testnetFiles(cmd *cobra.Command, args []string) error { - if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { - return fmt.Errorf( - "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", - nValidators+nNonValidators, - ) - } + cmd.RunE = func(cmd *cobra.Command, args []string) error { + if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { + return fmt.Errorf( + "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", + nValidators+nNonValidators, + ) + } - // set mode to validator for testnet - config := cfg.DefaultValidatorConfig() + // set mode to validator for testnet + config := cfg.DefaultValidatorConfig() - // overwrite default config if set and valid - if configFile != "" { - viper.SetConfigFile(configFile) - if err := viper.ReadInConfig(); err != nil { - return err - } - if err := viper.Unmarshal(config); err != nil { - return err - } - if err := config.ValidateBasic(); err != nil { - return err + // overwrite default config if set and valid + if configFile != "" { + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return err + } + if err := viper.Unmarshal(config); err != nil { + return err + } + if err := config.ValidateBasic(); err != nil { + return err + } } - } - genVals := make([]types.GenesisValidator, nValidators) + genVals := make([]types.GenesisValidator, nValidators) + ctx := cmd.Context() + for i := 0; i < nValidators; i++ { + nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) + nodeDir := filepath.Join(outputDir, nodeDirName) + config.SetRoot(nodeDir) + + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - for i := 0; i < nValidators; i++ { - nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) - nodeDir := filepath.Join(outputDir, nodeDirName) - config.SetRoot(nodeDir) + if err := initFilesWithConfig(ctx, config, logger); err != nil { + return err + } - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } - err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } + pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key) + pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State) + pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile) + if err != nil { + return err + } - if err := initFilesWithConfig(config); err != nil { - return err - } + ctx, cancel := context.WithTimeout(ctx, ctxTimeout) + defer cancel() - pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key) - pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State) - pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile) - if err != nil { - return err + pubKey, err := pv.GetPubKey(ctx) + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + genVals[i] = types.GenesisValidator{ + Address: pubKey.Address(), + PubKey: pubKey, + Power: 1, + Name: nodeDirName, + } } - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + for i := 0; i < nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) + config.SetRoot(nodeDir) - pubKey, err := pv.GetPubKey(ctx) - if err != nil { - return fmt.Errorf("can't get pubkey: %w", err) - } - genVals[i] = types.GenesisValidator{ - Address: pubKey.Address(), - PubKey: pubKey, - Power: 1, - Name: nodeDirName, - } - } + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - for i := 0; i < nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) - config.SetRoot(nodeDir) + err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + if err := initFilesWithConfig(ctx, conf, logger); err != nil { + return err + } } - err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + // Generate genesis doc from generated validators + genDoc := &types.GenesisDoc{ + ChainID: "chain-" + tmrand.Str(6), + GenesisTime: tmtime.Now(), + InitialHeight: initialHeight, + Validators: genVals, + ConsensusParams: types.DefaultConsensusParams(), } - - if err := initFilesWithConfig(config); err != nil { - return err + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = types.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } } - } - // Generate genesis doc from generated validators - genDoc := &types.GenesisDoc{ - ChainID: "chain-" + tmrand.Str(6), - GenesisTime: tmtime.Now(), - InitialHeight: initialHeight, - Validators: genVals, - ConsensusParams: types.DefaultConsensusParams(), - } - if keyType == "secp256k1" { - genDoc.ConsensusParams.Validator = types.ValidatorParams{ - PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + // Write genesis file. + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) + if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { + _ = os.RemoveAll(outputDir) + return err + } } - } - // Write genesis file. - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) - if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { - _ = os.RemoveAll(outputDir) - return err + // Gather persistent peer addresses. + var ( + persistentPeers = make([]string, 0) + err error + ) + tpargs := testnetPeerArgs{ + numValidators: nValidators, + numNonValidators: nNonValidators, + peerToPeerPort: p2pPort, + nodeDirPrefix: nodeDirPrefix, + outputDir: outputDir, + hostnames: hostnames, + startingIPAddr: startingIPAddress, + hostnamePrefix: hostnamePrefix, + hostnameSuffix: hostnameSuffix, + randomMonikers: randomMonikers, } - } - // Gather persistent peer addresses. - var ( - persistentPeers = make([]string, 0) - err error - ) - if populatePersistentPeers { - persistentPeers, err = persistentPeersArray(config) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + if populatePersistentPeers { + + persistentPeers, err = persistentPeersArray(config, tpargs) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } } - } - // Overwrite default config. - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) - config.SetRoot(nodeDir) - config.P2P.AllowDuplicateIP = true - if populatePersistentPeers { - persistentPeersWithoutSelf := make([]string, 0) - for j := 0; j < len(persistentPeers); j++ { - if j == i { - continue + // Overwrite default config. + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) + config.SetRoot(nodeDir) + config.P2P.AllowDuplicateIP = true + if populatePersistentPeers { + persistentPeersWithoutSelf := make([]string, 0) + for j := 0; j < len(persistentPeers); j++ { + if j == i { + continue + } + persistentPeersWithoutSelf = append(persistentPeersWithoutSelf, persistentPeers[j]) } - persistentPeersWithoutSelf = append(persistentPeersWithoutSelf, persistentPeers[j]) + config.P2P.PersistentPeers = strings.Join(persistentPeersWithoutSelf, ",") + } + config.Moniker = tpargs.moniker(i) + + if err := cfg.WriteConfigFile(nodeDir, config); err != nil { + return err } - config.P2P.PersistentPeers = strings.Join(persistentPeersWithoutSelf, ",") } - config.Moniker = moniker(i) - cfg.WriteConfigFile(nodeDir, config) + fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) + return nil } - fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) - return nil + return cmd +} + +type testnetPeerArgs struct { + numValidators int + numNonValidators int + peerToPeerPort int + nodeDirPrefix string + outputDir string + hostnames []string + startingIPAddr string + hostnamePrefix string + hostnameSuffix string + randomMonikers bool } -func hostnameOrIP(i int) string { - if len(hostnames) > 0 && i < len(hostnames) { - return hostnames[i] +func (args *testnetPeerArgs) hostnameOrIP(i int) (string, error) { + if len(args.hostnames) > 0 && i < len(args.hostnames) { + return args.hostnames[i], nil } - if startingIPAddress == "" { - return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + if args.startingIPAddr == "" { + return fmt.Sprintf("%s%d%s", args.hostnamePrefix, i, args.hostnameSuffix), nil } - ip := net.ParseIP(startingIPAddress) + ip := net.ParseIP(args.startingIPAddr) ip = ip.To4() if ip == nil { - fmt.Printf("%v: non ipv4 address\n", startingIPAddress) - os.Exit(1) + return "", fmt.Errorf("%v is non-ipv4 address", args.startingIPAddr) } for j := 0; j < i; j++ { ip[3]++ } - return ip.String() + return ip.String(), nil + } // get an array of persistent peers -func persistentPeersArray(config *cfg.Config) ([]string, error) { - peers := make([]string, nValidators+nNonValidators) - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) +func persistentPeersArray(config *cfg.Config, args testnetPeerArgs) ([]string, error) { + peers := make([]string, args.numValidators+args.numNonValidators) + for i := 0; i < len(peers); i++ { + nodeDir := filepath.Join(args.outputDir, fmt.Sprintf("%s%d", args.nodeDirPrefix, i)) config.SetRoot(nodeDir) nodeKey, err := config.LoadNodeKeyID() if err != nil { - return []string{}, err + return nil, err } - peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) + addr, err := args.hostnameOrIP(i) + if err != nil { + return nil, err + } + + peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", addr, args.peerToPeerPort)) } return peers, nil } -func moniker(i int) string { - if randomMonikers { +func (args *testnetPeerArgs) moniker(i int) string { + if args.randomMonikers { return randomMoniker() } - if len(hostnames) > 0 && i < len(hostnames) { - return hostnames[i] + if len(args.hostnames) > 0 && i < len(args.hostnames) { + return args.hostnames[i] } - if startingIPAddress == "" { - return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + if args.startingIPAddr == "" { + return fmt.Sprintf("%s%d%s", args.hostnamePrefix, i, args.hostnameSuffix) } return randomMoniker() } diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 52a00e4c06..6d2391dde2 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -1,38 +1,50 @@ package main import ( - "os" - "path/filepath" + "context" - cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + "github.com/tendermint/tendermint/cmd/tendermint/commands" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/node" ) func main() { - rootCmd := cmd.RootCmd - rootCmd.AddCommand( - cmd.GenValidatorCmd, - cmd.ReIndexEventCmd, - cmd.InitFilesCmd, - cmd.ProbeUpnpCmd, - cmd.LightCmd, - cmd.ReplayCmd, - cmd.ReplayConsoleCmd, - cmd.ResetAllCmd, - cmd.ResetPrivValidatorCmd, - cmd.ShowValidatorCmd, - cmd.TestnetFilesCmd, - cmd.ShowNodeIDCmd, - cmd.GenNodeKeyCmd, - cmd.VersionCmd, - cmd.InspectCmd, - cmd.RollbackStateCmd, - cmd.MakeKeyMigrateCommand(), + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + conf, err := commands.ParseConfig(config.DefaultConfig()) + if err != nil { + panic(err) + } + + logger, err := log.NewDefaultLogger(conf.LogFormat, conf.LogLevel) + if err != nil { + panic(err) + } + + rcmd := commands.RootCommand(conf, logger) + rcmd.AddCommand( + commands.MakeGenValidatorCommand(), + commands.MakeReindexEventCommand(conf, logger), + commands.MakeInitFilesCommand(conf, logger), + commands.MakeLightCommand(conf, logger), + commands.MakeReplayCommand(conf, logger), + commands.MakeReplayConsoleCommand(conf, logger), + commands.MakeResetAllCommand(conf, logger), + commands.MakeResetPrivateValidatorCommand(conf, logger), + commands.MakeShowValidatorCommand(conf, logger), + commands.MakeTestnetFilesCommand(conf, logger), + commands.MakeShowNodeIDCommand(conf), + commands.GenNodeKeyCmd, + commands.VersionCmd, + commands.MakeInspectCommand(conf, logger), + commands.MakeRollbackStateCommand(conf), + commands.MakeKeyMigrateCommand(conf, logger), debug.DebugCmd, - cli.NewCompletionCmd(rootCmd, true), + cli.NewCompletionCmd(rcmd, true), ) // NOTE: @@ -46,10 +58,9 @@ func main() { nodeFunc := node.NewDefault // Create & start node - rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) + rcmd.AddCommand(commands.NewRunNodeCmd(nodeFunc, conf, logger)) - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir))) - if err := cmd.Execute(); err != nil { + if err := rcmd.ExecuteContext(ctx); err != nil { panic(err) } } diff --git a/config/config.go b/config/config.go index a9b2576fd5..3758c92413 100644 --- a/config/config.go +++ b/config/config.go @@ -2,15 +2,14 @@ package config import ( "encoding/hex" + "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" "time" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/types" @@ -28,11 +27,6 @@ const ( ModeFull = "full" ModeValidator = "validator" ModeSeed = "seed" - - BlockSyncV0 = "v0" - - MempoolV0 = "v0" - MempoolV1 = "v1" ) // NOTE: Most of the structs & relevant comments + the @@ -73,7 +67,6 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - BlockSync *BlockSyncConfig `mapstructure:"blocksync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -88,7 +81,6 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), StateSync: DefaultStateSyncConfig(), - BlockSync: DefaultBlockSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), @@ -111,7 +103,6 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), StateSync: TestStateSyncConfig(), - BlockSync: TestBlockSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), @@ -145,9 +136,6 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.StateSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [statesync] section: %w", err) } - if err := cfg.BlockSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [blocksync] section: %w", err) - } if err := cfg.Consensus.ValidateBasic(); err != nil { return fmt.Errorf("error in [consensus] section: %w", err) } @@ -277,12 +265,12 @@ func (cfg BaseConfig) NodeKeyFile() string { // LoadNodeKey loads NodeKey located in filePath. func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) { - jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile()) + jsonBytes, err := os.ReadFile(cfg.NodeKeyFile()) if err != nil { return "", err } nodeKey := types.NodeKey{} - err = tmjson.Unmarshal(jsonBytes, &nodeKey) + err = json.Unmarshal(jsonBytes, &nodeKey) if err != nil { return "", err } @@ -333,28 +321,6 @@ func (cfg BaseConfig) ValidateBasic() error { return fmt.Errorf("unknown mode: %v", cfg.Mode) } - // TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle. - // This check was added to give users an upgrade prompt to use the new - // configuration option in v0.35. In future release cycles they should no longer - // be using this configuration parameter so the check can be removed. - // The cfg.Other field can likely be removed at the same time if it is not referenced - // elsewhere as it was added to service this check. - if fs, ok := cfg.Other["fastsync"]; ok { - if _, ok := fs.(map[string]interface{}); ok { - return fmt.Errorf("a configuration section named 'fastsync' was found in the " + - "configuration file. The 'fastsync' section has been renamed to " + - "'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'") - } - } - if fs, ok := cfg.Other["fast-sync"]; ok { - if fs != "" { - return fmt.Errorf("a parameter named 'fast-sync' was found in the " + - "configuration file. The parameter to enable or disable quickly syncing with a blockchain" + - "has moved to the [blocksync] section of the configuration file as blocksync.enable. " + - "Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'") - } - } - return nil } @@ -605,9 +571,11 @@ type P2PConfig struct { //nolint: maligned // Comma separated list of seed nodes to connect to // We only use these if we can’t connect to peers in the addrbook - // NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. - // TODO: Remove once p2p refactor is complete - // ref: https://github.com/tendermint/tendermint/issues/5670 + // + // Deprecated: This value is not used by the new PEX reactor. Use + // BootstrapPeers instead. + // + // TODO(#5670): Remove once the p2p refactor is complete. Seeds string `mapstructure:"seeds"` // Comma separated list of peers to be added to the peer store @@ -639,6 +607,18 @@ type P2PConfig struct { //nolint: maligned // Toggle to disable guard against peers connecting from the same ip. AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` + + // Maximum size of a message packet payload, in bytes + MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` + + // Rate at which packets can be sent, in bytes/second + SendRate int64 `mapstructure:"send-rate"` + + // Rate at which packets can be received, in bytes/second + RecvRate int64 `mapstructure:"recv-rate"` + // Peer connection configuration. HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` @@ -661,20 +641,48 @@ func DefaultP2PConfig() *P2PConfig { UPNP: false, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PexReactor: true, - AllowDuplicateIP: false, - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + FlushThrottleTimeout: 100 * time.Millisecond, + // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. + // The IP header and the TCP header take up 20 bytes each at least (unless + // optional header fields are used) and thus the max for (non-Jumbo frame) + // Ethernet is 1500 - 20 -20 = 1460 + // Source: https://stackoverflow.com/a/3074427/820520 + MaxPacketMsgPayloadSize: 1400, + SendRate: 5120000, // 5 mB/s + RecvRate: 5120000, // 5 mB/s + PexReactor: true, + AllowDuplicateIP: false, + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + QueueType: "priority", } } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush-throttle-timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max-packet-msg-payload-size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send-rate can't be negative") + } + if cfg.RecvRate < 0 { + return errors.New("recv-rate can't be negative") + } + return nil +} + // TestP2PConfig returns a configuration for testing the peer-to-peer layer func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" cfg.AllowDuplicateIP = true + cfg.FlushThrottleTimeout = 10 * time.Millisecond return cfg } @@ -683,7 +691,6 @@ func TestP2PConfig() *P2PConfig { // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - Version string `mapstructure:"version"` RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` @@ -733,7 +740,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Version: MempoolV1, Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement @@ -902,31 +908,6 @@ func (cfg *StateSyncConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- - -// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service -// If this node is many blocks behind the tip of the chain, BlockSync -// allows them to catchup quickly by downloading blocks in parallel -// and verifying their commits. -type BlockSyncConfig struct { - Enable bool `mapstructure:"enable"` -} - -// DefaultBlockSyncConfig returns a default configuration for the block sync service -func DefaultBlockSyncConfig() *BlockSyncConfig { - return &BlockSyncConfig{ - Enable: true, - } -} - -// TestBlockSyncConfig returns a default configuration for the block sync. -func TestBlockSyncConfig() *BlockSyncConfig { - return DefaultBlockSyncConfig() -} - -// ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { return nil } - //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index 1813144924..d768a17029 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -10,21 +10,19 @@ import ( ) func TestDefaultConfig(t *testing.T) { - assert := assert.New(t) - // set up some defaults cfg := DefaultConfig() - assert.NotNil(cfg.P2P) - assert.NotNil(cfg.Mempool) - assert.NotNil(cfg.Consensus) + assert.NotNil(t, cfg.P2P) + assert.NotNil(t, cfg.Mempool) + assert.NotNil(t, cfg.Consensus) // check the root dir stuff... cfg.SetRoot("/foo") cfg.Genesis = "bar" cfg.DBPath = "/opt/data" - assert.Equal("/foo/bar", cfg.GenesisFile()) - assert.Equal("/opt/data", cfg.DBDir()) + assert.Equal(t, "/foo/bar", cfg.GenesisFile()) + assert.Equal(t, "/opt/data", cfg.DBDir()) } func TestConfigValidateBasic(t *testing.T) { @@ -37,19 +35,18 @@ func TestConfigValidateBasic(t *testing.T) { } func TestTLSConfiguration(t *testing.T) { - assert := assert.New(t) cfg := DefaultConfig() cfg.SetRoot("/home/user") cfg.RPC.TLSCertFile = "file.crt" - assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile()) + assert.Equal(t, "/home/user/config/file.crt", cfg.RPC.CertFile()) cfg.RPC.TLSKeyFile = "file.key" - assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile()) + assert.Equal(t, "/home/user/config/file.key", cfg.RPC.KeyFile()) cfg.RPC.TLSCertFile = "/abs/path/to/file.crt" - assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile()) + assert.Equal(t, "/abs/path/to/file.crt", cfg.RPC.CertFile()) cfg.RPC.TLSKeyFile = "/abs/path/to/file.key" - assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile()) + assert.Equal(t, "/abs/path/to/file.key", cfg.RPC.KeyFile()) } func TestBaseConfigValidateBasic(t *testing.T) { @@ -104,13 +101,7 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) } -func TestBlockSyncConfigValidateBasic(t *testing.T) { - cfg := TestBlockSyncConfig() - assert.NoError(t, cfg.ValidateBasic()) -} - func TestConsensusConfig_ValidateBasic(t *testing.T) { - // nolint: lll testcases := map[string]struct { modify func(*ConsensusConfig) expectErr bool @@ -159,3 +150,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} diff --git a/config/db.go b/config/db.go index 8f489a87aa..f508354e07 100644 --- a/config/db.go +++ b/config/db.go @@ -1,6 +1,8 @@ package config import ( + "context" + dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" @@ -8,7 +10,7 @@ import ( ) // ServiceProvider takes a config and a logger and returns a ready to go Node. -type ServiceProvider func(*Config, log.Logger) (service.Service, error) +type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error) // DBContext specifies config information for loading a new DB. type DBContext struct { diff --git a/config/toml.go b/config/toml.go index 3be385060c..6653007278 100644 --- a/config/toml.go +++ b/config/toml.go @@ -3,13 +3,13 @@ package config import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" "text/template" tmos "github.com/tendermint/tendermint/libs/os" + tmrand "github.com/tendermint/tendermint/libs/rand" ) // DefaultDirPerm is the default permissions used when creating directories. @@ -45,23 +45,29 @@ func EnsureRoot(rootDir string) { // WriteConfigFile renders config using the template and writes it to configFilePath. // This function is called by cmd/tendermint/commands/init.go -func WriteConfigFile(rootDir string, config *Config) { +func WriteConfigFile(rootDir string, config *Config) error { + return config.WriteToTemplate(filepath.Join(rootDir, defaultConfigFilePath)) +} + +// WriteToTemplate writes the config to the exact file specified by +// the path, in the default toml template and does not mangle the path +// or filename at all. +func (cfg *Config) WriteToTemplate(path string) error { var buffer bytes.Buffer - if err := configTemplate.Execute(&buffer, config); err != nil { - panic(err) + if err := configTemplate.Execute(&buffer, cfg); err != nil { + return err } - configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - - mustWriteFile(configFilePath, buffer.Bytes(), 0644) + return writeFile(path, buffer.Bytes(), 0644) } -func writeDefaultConfigFileIfNone(rootDir string) { +func writeDefaultConfigFileIfNone(rootDir string) error { configFilePath := filepath.Join(rootDir, defaultConfigFilePath) if !tmos.FileExists(configFilePath) { - WriteConfigFile(rootDir, DefaultConfig()) + return WriteConfigFile(rootDir, DefaultConfig()) } + return nil } // Note: any changes to the comments/variables/mapstructure @@ -300,16 +306,28 @@ allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} handshake-timeout = "{{ .P2P.HandshakeTimeout }}" dial-timeout = "{{ .P2P.DialTimeout }}" +# Time to wait before flushing messages out on the connection +# TODO: Remove once MConnConnection is removed. +flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once MConnConnection is removed. +max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once MConnConnection is removed. +send-rate = {{ .P2P.SendRate }} + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once MConnConnection is removed. +recv-rate = {{ .P2P.RecvRate }} + + ####################################################### ### Mempool Configuration Option ### ####################################################### [mempool] -# Mempool version to use: -# 1) "v0" - The legacy non-prioritized mempool reactor. -# 2) "v1" (default) - The prioritized mempool reactor. -version = "{{ .Mempool.Version }}" - recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} @@ -398,16 +416,6 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}" # The number of concurrent chunk and block fetchers to run (default: 4). fetchers = "{{ .StateSync.Fetchers }}" -####################################################### -### Block Sync Configuration Connections ### -####################################################### -[blocksync] - -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -enable = {{ .BlockSync.Enable }} - ####################################################### ### Consensus Configuration Options ### ####################################################### @@ -496,22 +504,22 @@ namespace = "{{ .Instrumentation.Namespace }}" /****** these are for test settings ***********/ -func ResetTestRoot(testName string) *Config { +func ResetTestRoot(testName string) (*Config, error) { return ResetTestRootWithChainID(testName, "") } -func ResetTestRootWithChainID(testName string, chainID string) *Config { +func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { - panic(err) + return nil, err } // ensure config and data subdirs are created if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { - panic(err) + return nil, err } conf := DefaultConfig() @@ -520,26 +528,37 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State) // Write default config file if missing. - writeDefaultConfigFileIfNone(rootDir) + if err := writeDefaultConfigFileIfNone(rootDir); err != nil { + return nil, err + } + if !tmos.FileExists(genesisFilePath) { if chainID == "" { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - mustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + if err := writeFile(genesisFilePath, []byte(testGenesis), 0644); err != nil { + return nil, err + } } // we always overwrite the priv val - mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + if err := writeFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644); err != nil { + return nil, err + } + if err := writeFile(privStateFilePath, []byte(testPrivValidatorState), 0644); err != nil { + return nil, err + } config := TestConfig().SetRoot(rootDir) - return config + config.Instrumentation.Namespace = fmt.Sprintf("%s_%s_%s", testName, chainID, tmrand.Str(16)) + return config, nil } -func mustWriteFile(filePath string, contents []byte, mode os.FileMode) { - if err := ioutil.WriteFile(filePath, contents, mode); err != nil { - tmos.Exit(fmt.Sprintf("failed to write file: %v", err)) +func writeFile(filePath string, contents []byte, mode os.FileMode) error { + if err := os.WriteFile(filePath, contents, mode); err != nil { + return fmt.Errorf("failed to write file: %w", err) } + return nil } var testGenesisFmt = `{ @@ -552,6 +571,10 @@ var testGenesisFmt = `{ "max_gas": "-1", "time_iota_ms": "10" }, + "synchrony": { + "message_delay": "500000000", + "precision": "10000000" + }, "evidence": { "max_age_num_blocks": "100000", "max_age_duration": "172800000000000", diff --git a/config/toml_test.go b/config/toml_test.go index ccf818d650..fa7e88da09 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -15,26 +14,24 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) { for _, f := range files { p := rootify(rootDir, f) _, err := os.Stat(p) - assert.Nil(t, err, p) + assert.NoError(t, err, p) } } func TestEnsureRoot(t *testing.T) { - require := require.New(t) - // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") - require.Nil(err) + tmpDir, err := os.MkdirTemp("", "config-test") + require.NoError(t, err) defer os.RemoveAll(tmpDir) // create root dir EnsureRoot(tmpDir) - WriteConfigFile(tmpDir, DefaultConfig()) + require.NoError(t, WriteConfigFile(tmpDir, DefaultConfig())) // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) - require.Nil(err) + data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + require.NoError(t, err) checkConfig(t, string(data)) @@ -42,18 +39,17 @@ func TestEnsureRoot(t *testing.T) { } func TestEnsureTestRoot(t *testing.T) { - require := require.New(t) - testName := "ensureTestRoot" // create root dir - cfg := ResetTestRoot(testName) + cfg, err := ResetTestRoot(testName) + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) rootDir := cfg.RootDir // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) - require.Nil(err) + data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + require.NoError(t, err) checkConfig(t, string(data)) @@ -70,7 +66,6 @@ func checkConfig(t *testing.T, configFile string) { "moniker", "seeds", "proxy-app", - "blocksync", "create-empty-blocks", "peer", "timeout", diff --git a/crypto/crypto.go b/crypto/crypto.go index 8d44b82f50..4f0dc05e75 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -2,6 +2,7 @@ package crypto import ( "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/libs/bytes" ) @@ -25,6 +26,9 @@ type PubKey interface { VerifySignature(msg []byte, sig []byte) bool Equals(PubKey) bool Type() string + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged } type PrivKey interface { @@ -33,6 +37,9 @@ type PrivKey interface { PubKey() PubKey Equals(PrivKey) bool Type() string + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged } type Symmetric interface { diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index 3ac7f6d073..ffd4a3ed10 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" ) //------------------------------------- @@ -56,13 +56,16 @@ const ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } // PrivKey implements crypto.PrivKey. type PrivKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes returns the privkey byte format. func (privKey PrivKey) Bytes() []byte { return []byte(privKey) @@ -151,6 +154,9 @@ var _ crypto.PubKey = PubKey{} // PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index 63c781a3b6..a6acafc580 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -17,7 +17,7 @@ func TestSignAndValidateEd25519(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) // Test the signature assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 37249bcb32..fd32f101cb 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -7,14 +7,14 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" - "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) func init() { - json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey") - json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") - json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + jsontypes.MustRegister((*cryptoproto.PublicKey)(nil)) + jsontypes.MustRegister((*cryptoproto.PublicKey_Ed25519)(nil)) + jsontypes.MustRegister((*cryptoproto.PublicKey_Secp256K1)(nil)) } // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 80b289d231..4f09e44146 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -24,10 +24,10 @@ const ( // everything. This also affects the generalized proof system as // well. type Proof struct { - Total int64 `json:"total"` // Total number of items. - Index int64 `json:"index"` // Index of item to prove. - LeafHash []byte `json:"leaf_hash"` // Hash of item value. - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int64 `json:"total,string"` // Total number of items. + Index int64 `json:"index,string"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } // ProofsFromByteSlices computes inclusion proof for given items. diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 0cc947643f..13d26b3601 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -28,13 +28,13 @@ func TestKeyPath(t *testing.T) { case KeyEncodingHex: rand.Read(keys[i]) default: - panic("Unexpected encoding") + require.Fail(t, "Unexpected encoding") } path = path.AppendKey(keys[i], enc) } res, err := KeyPathToKeys(path.String()) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, len(keys), len(res)) for i, key := range keys { diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index f0d2f86896..05a5ca369a 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -79,58 +79,58 @@ func TestProofOperators(t *testing.T) { // Good popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.Nil(t, err) + assert.NoError(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) - assert.Nil(t, err) + assert.NoError(t, err) // BAD INPUT err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) - assert.NotNil(t, err) + assert.Error(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 1 err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 2 err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 3 err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 4 err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 5 err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD OUTPUT 1 err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD OUTPUT 2 err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 1 popz = []ProofOperator{op1, op2, op4} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 2 popz = []ProofOperator{op4, op3, op2, op1} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 3 popz = []ProofOperator{} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) } func bz(s string) []byte { diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index c2c0c60175..f212874c77 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -10,7 +10,7 @@ import ( secp256k1 "github.com/btcsuite/btcd/btcec" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" // necessary for Bitcoin address format "golang.org/x/crypto/ripemd160" // nolint @@ -26,8 +26,8 @@ const ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } var _ crypto.PrivKey = PrivKey{} @@ -35,6 +35,9 @@ var _ crypto.PrivKey = PrivKey{} // PrivKey implements PrivKey. type PrivKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes marshalls the private key using amino encoding. func (privKey PrivKey) Bytes() []byte { return []byte(privKey) @@ -138,6 +141,9 @@ const PubKeySize = 33 // This prefix is followed with the x-coordinate. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 7a11092939..6cd53704c5 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -52,7 +52,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/sr25519/encoding.go b/crypto/sr25519/encoding.go index c0a8a7925e..7ff110821a 100644 --- a/crypto/sr25519/encoding.go +++ b/crypto/sr25519/encoding.go @@ -1,6 +1,8 @@ package sr25519 -import tmjson "github.com/tendermint/tendermint/libs/json" +import ( + "github.com/tendermint/tendermint/internal/jsontypes" +) const ( PrivKeyName = "tendermint/PrivKeySr25519" @@ -8,6 +10,6 @@ const ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } diff --git a/crypto/sr25519/privkey.go b/crypto/sr25519/privkey.go index f628ca1aab..4e9cc995f3 100644 --- a/crypto/sr25519/privkey.go +++ b/crypto/sr25519/privkey.go @@ -29,6 +29,9 @@ type PrivKey struct { kp *sr25519.KeyPair } +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes returns the byte-encoded PrivKey. func (privKey PrivKey) Bytes() []byte { if privKey.kp == nil { diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go index 7e701dd1f1..717f25c8c8 100644 --- a/crypto/sr25519/pubkey.go +++ b/crypto/sr25519/pubkey.go @@ -23,6 +23,9 @@ const ( // PubKey implements crypto.PubKey. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { diff --git a/crypto/sr25519/sr25519_test.go b/crypto/sr25519/sr25519_test.go index de5c125f47..84283eacaa 100644 --- a/crypto/sr25519/sr25519_test.go +++ b/crypto/sr25519/sr25519_test.go @@ -18,7 +18,7 @@ func TestSignAndValidateSr25519(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) // Test the signature assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/xchacha20poly1305/vector_test.go b/crypto/xchacha20poly1305/vector_test.go deleted file mode 100644 index c6ca9d8d23..0000000000 --- a/crypto/xchacha20poly1305/vector_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -func toHex(bits []byte) string { - return hex.EncodeToString(bits) -} - -func fromHex(bits string) []byte { - b, err := hex.DecodeString(bits) - if err != nil { - panic(err) - } - return b -} - -func TestHChaCha20(t *testing.T) { - for i, v := range hChaCha20Vectors { - var key [32]byte - var nonce [16]byte - copy(key[:], v.key) - copy(nonce[:], v.nonce) - - HChaCha20(&key, &nonce, &key) - if !bytes.Equal(key[:], v.keystream) { - t.Errorf("test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream)) - } - } -} - -var hChaCha20Vectors = []struct { - key, nonce, keystream []byte -}{ - { - fromHex("0000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"), - }, - { - fromHex("8000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"), - }, - { - fromHex("0000000000000000000000000000000000000000000000000000000000000001"), - fromHex("000000000000000000000000000000000000000000000002"), - fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"), - }, - { - fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), - fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"), - fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"), - }, - { - fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"), - fromHex("d9660c5900ae19ddad28d6e06e45fe5e"), - fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"), - }, -} - -func TestVectors(t *testing.T) { - for i, v := range vectors { - if len(v.plaintext) == 0 { - v.plaintext = make([]byte, len(v.ciphertext)) - } - - var nonce [24]byte - copy(nonce[:], v.nonce) - - aead, err := New(v.key) - if err != nil { - t.Error(err) - } - - dst := aead.Seal(nil, nonce[:], v.plaintext, v.ad) - if !bytes.Equal(dst, v.ciphertext) { - t.Errorf("test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext)) - } - open, err := aead.Open(nil, nonce[:], dst, v.ad) - if err != nil { - t.Error(err) - } - if !bytes.Equal(open, v.plaintext) { - t.Errorf("test %d: plaintext mismatch:\n \t got: %s\n \t want: %s", i, string(open), string(v.plaintext)) - } - } -} - -var vectors = []struct { - key, nonce, ad, plaintext, ciphertext []byte -}{ - { - []byte{ - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, - 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, - 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - }, - []byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b}, - []byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7}, - []byte( - "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.", - ), - []byte{ - 0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56, - 0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49, - 0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8, - 0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2, - 0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6, - 0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5, - 0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32, - 0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14, - 0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62, - 0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b, - 0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab, - 0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea, - }, - }, -} diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go deleted file mode 100644 index 2578520a5a..0000000000 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package xchacha20poly1305 creates an AEAD using hchacha, chacha, and poly1305 -// This allows for randomized nonces to be used in conjunction with chacha. -package xchacha20poly1305 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "fmt" - - "golang.org/x/crypto/chacha20poly1305" -) - -// Implements crypto.AEAD -type xchacha20poly1305 struct { - key [KeySize]byte -} - -const ( - // KeySize is the size of the key used by this AEAD, in bytes. - KeySize = 32 - // NonceSize is the size of the nonce used with this AEAD, in bytes. - NonceSize = 24 - // TagSize is the size added from poly1305 - TagSize = 16 - // MaxPlaintextSize is the max size that can be passed into a single call of Seal - MaxPlaintextSize = (1 << 38) - 64 - // MaxCiphertextSize is the max size that can be passed into a single call of Open, - // this differs from plaintext size due to the tag - MaxCiphertextSize = (1 << 38) - 48 - - // sigma are constants used in xchacha. - // Unrolled from a slice so that they can be inlined, as slices can't be constants. - sigma0 = uint32(0x61707865) - sigma1 = uint32(0x3320646e) - sigma2 = uint32(0x79622d32) - sigma3 = uint32(0x6b206574) -) - -// New returns a new xchachapoly1305 AEAD -func New(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("xchacha20poly1305: bad key length") - } - ret := new(xchacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (c *xchacha20poly1305) NonceSize() int { - return NonceSize -} - -func (c *xchacha20poly1305) Overhead() int { - return TagSize -} - -func (c *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSize { - panic("xchacha20poly1305: bad nonce length passed to Seal") - } - - if uint64(len(plaintext)) > MaxPlaintextSize { - panic("xchacha20poly1305: plaintext too large") - } - - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Seal(dst, subNonce[:], plaintext, additionalData) -} - -func (c *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("xchacha20poly1305: bad nonce length passed to Open") - } - if uint64(len(ciphertext)) > MaxCiphertextSize { - return nil, fmt.Errorf("xchacha20poly1305: ciphertext too large") - } - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Open(dst, subNonce[:], ciphertext, additionalData) -} - -// HChaCha exported from -// https://github.com/aead/chacha20/blob/8b13a72661dae6e9e5dea04f344f0dc95ea29547/chacha/chacha_generic.go#L194 -// TODO: Add support for the different assembly instructions used there. - -// The MIT License (MIT) - -// Copyright (c) 2016 Andreas Auernhammer - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key. -// It can be used as a key-derivation-function (KDF). -func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20Generic(out, nonce, key) } - -func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) { - v00 := sigma0 - v01 := sigma1 - v02 := sigma2 - v03 := sigma3 - v04 := binary.LittleEndian.Uint32(key[0:]) - v05 := binary.LittleEndian.Uint32(key[4:]) - v06 := binary.LittleEndian.Uint32(key[8:]) - v07 := binary.LittleEndian.Uint32(key[12:]) - v08 := binary.LittleEndian.Uint32(key[16:]) - v09 := binary.LittleEndian.Uint32(key[20:]) - v10 := binary.LittleEndian.Uint32(key[24:]) - v11 := binary.LittleEndian.Uint32(key[28:]) - v12 := binary.LittleEndian.Uint32(nonce[0:]) - v13 := binary.LittleEndian.Uint32(nonce[4:]) - v14 := binary.LittleEndian.Uint32(nonce[8:]) - v15 := binary.LittleEndian.Uint32(nonce[12:]) - - for i := 0; i < 20; i += 2 { - v00 += v04 - v12 ^= v00 - v12 = (v12 << 16) | (v12 >> 16) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 12) | (v04 >> 20) - v00 += v04 - v12 ^= v00 - v12 = (v12 << 8) | (v12 >> 24) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 7) | (v04 >> 25) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 16) | (v13 >> 16) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 12) | (v05 >> 20) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 8) | (v13 >> 24) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 7) | (v05 >> 25) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 16) | (v14 >> 16) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 12) | (v06 >> 20) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 8) | (v14 >> 24) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 7) | (v06 >> 25) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 16) | (v15 >> 16) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 12) | (v07 >> 20) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 8) | (v15 >> 24) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 7) | (v07 >> 25) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 16) | (v15 >> 16) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 12) | (v05 >> 20) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 8) | (v15 >> 24) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 7) | (v05 >> 25) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 16) | (v12 >> 16) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 12) | (v06 >> 20) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 8) | (v12 >> 24) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 7) | (v06 >> 25) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 16) | (v13 >> 16) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 12) | (v07 >> 20) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 8) | (v13 >> 24) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 7) | (v07 >> 25) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 16) | (v14 >> 16) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 12) | (v04 >> 20) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 8) | (v14 >> 24) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 7) | (v04 >> 25) - } - - binary.LittleEndian.PutUint32(out[0:], v00) - binary.LittleEndian.PutUint32(out[4:], v01) - binary.LittleEndian.PutUint32(out[8:], v02) - binary.LittleEndian.PutUint32(out[12:], v03) - binary.LittleEndian.PutUint32(out[16:], v12) - binary.LittleEndian.PutUint32(out[20:], v13) - binary.LittleEndian.PutUint32(out[24:], v14) - binary.LittleEndian.PutUint32(out[28:], v15) -} diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go deleted file mode 100644 index 75953d72d2..0000000000 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - crand "crypto/rand" - mrand "math/rand" - "testing" -) - -// The following test is taken from -// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69 -// It requires the below copyright notice, where "this source code" refers to the following function. -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found at the bottom of this file. -func TestRandom(t *testing.T) { - // Some random tests to verify Open(Seal) == Plaintext - for i := 0; i < 256; i++ { - var nonce [24]byte - var key [32]byte - - al := mrand.Intn(128) - pl := mrand.Intn(16384) - ad := make([]byte, al) - plaintext := make([]byte, pl) - _, err := crand.Read(key[:]) - if err != nil { - t.Errorf("error on read: %w", err) - } - _, err = crand.Read(nonce[:]) - if err != nil { - t.Errorf("error on read: %w", err) - } - _, err = crand.Read(ad) - if err != nil { - t.Errorf("error on read: %w", err) - } - _, err = crand.Read(plaintext) - if err != nil { - t.Errorf("error on read: %w", err) - } - - aead, err := New(key[:]) - if err != nil { - t.Fatal(err) - } - - ct := aead.Seal(nil, nonce[:], plaintext, ad) - - plaintext2, err := aead.Open(nil, nonce[:], ct, ad) - if err != nil { - t.Errorf("random #%d: Open failed", i) - continue - } - - if !bytes.Equal(plaintext, plaintext2) { - t.Errorf("random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) - continue - } - - if len(ad) > 0 { - alterAdIdx := mrand.Intn(len(ad)) - ad[alterAdIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering additional data", i) - } - ad[alterAdIdx] ^= 0x80 - } - - alterNonceIdx := mrand.Intn(aead.NonceSize()) - nonce[alterNonceIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering nonce", i) - } - nonce[alterNonceIdx] ^= 0x80 - - alterCtIdx := mrand.Intn(len(ct)) - ct[alterCtIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering ciphertext", i) - } - ct[alterCtIdx] ^= 0x80 - } -} - -// AFOREMENTIONED LICENSE -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go deleted file mode 100644 index 74cb4b1033..0000000000 --- a/crypto/xsalsa20symmetric/symmetric.go +++ /dev/null @@ -1,54 +0,0 @@ -package xsalsa20symmetric - -import ( - "errors" - "fmt" - - "golang.org/x/crypto/nacl/secretbox" - - "github.com/tendermint/tendermint/crypto" -) - -// TODO, make this into a struct that implements crypto.Symmetric. - -const nonceLen = 24 -const secretLen = 32 - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - nonce := crypto.CRandBytes(nonceLen) - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext)) - copy(ciphertext, nonce) - secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr) - return ciphertext -} - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - if len(ciphertext) <= secretbox.Overhead+nonceLen { - return nil, errors.New("ciphertext is too short") - } - nonce := ciphertext[:nonceLen] - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead) - _, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr) - if !ok { - return nil, errors.New("ciphertext decryption failed") - } - return plaintext, nil -} diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go deleted file mode 100644 index 160d49a9ef..0000000000 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package xsalsa20symmetric - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "golang.org/x/crypto/bcrypt" - - "github.com/tendermint/tendermint/crypto" -) - -func TestSimple(t *testing.T) { - - plaintext := []byte("sometext") - secret := []byte("somesecretoflengththirtytwo===32") - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} - -func TestSimpleWithKDF(t *testing.T) { - - plaintext := []byte("sometext") - secretPass := []byte("somesecret") - secret, err := bcrypt.GenerateFromPassword(secretPass, 12) - if err != nil { - t.Error(err) - } - secret = crypto.Sha256(secret) - - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} diff --git a/docker-compose.yml b/docker-compose.yml index 76d89a53cd..157b7c381a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -65,5 +65,5 @@ networks: ipam: driver: default config: - - - subnet: 192.167.10.0/16 \ No newline at end of file + - + subnet: 192.167.10.0/16 diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 4653e2c5b1..afde320920 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -22,10 +22,6 @@ module.exports = { index: "tendermint" }, versions: [ - { - "label": "v0.32", - "key": "v0.32" - }, { "label": "v0.33", "key": "v0.33" @@ -34,6 +30,10 @@ module.exports = { "label": "v0.34", "key": "v0.34" }, + { + "label": "v0.35", + "key": "v0.35" + }, { "label": "master", "key": "master" diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index c1ab1580ab..da06785d57 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -11,9 +11,9 @@ and other supported release branches. There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) in the `tendermint/docs` repository that clones and builds the documentation -site from the contents of this `docs` directory, for `master` and for each -supported release branch. Under the hood, this workflow runs `make build-docs` -from the [Makefile](../Makefile#L214). +site from the contents of this `docs` directory, for `master` and for the +backport branch of each supported release. Under the hood, this workflow runs +`make build-docs` from the [Makefile](../Makefile#L214). The list of supported versions are defined in [`config.js`](./.vuepress/config.js), which defines the UI menu on the documentation site, and also in diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index cc0afdc556..27b58721b1 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -83,19 +83,19 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { if err != nil { return err } + + // Stop upon receiving SIGTERM or CTRL-C. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() + srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { + if err := srv.Start(ctx); err != nil { return err } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - srv.Stop() - }) - - // Run forever. - select {} + // Run until shutdown. +<-ctx.Done() +srv.Wait() } ``` diff --git a/docs/architecture/README.md b/docs/architecture/README.md index f6c12996f8..6e2dd6a1c8 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -65,7 +65,9 @@ Note the context/background should be written in the present tense. - [ADR-059: Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md) - [ADR-062: P2P-Architecture](./adr-062-p2p-architecture.md) - [ADR-063: Privval-gRPC](./adr-063-privval-grpc.md) -- [ADR-066-E2E-Testing](./adr-066-e2e-testing.md) +- [ADR-066: E2E-Testing](./adr-066-e2e-testing.md) +- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) + ### Accepted - [ADR-006: Trust-Metric](./adr-006-trust-metric.md) @@ -77,6 +79,7 @@ Note the context/background should be written in the present tense. - [ADR-065: Custom Event Indexing](./adr-065-custom-event-indexing.md) - [ADR-068: Reverse-Sync](./adr-068-reverse-sync.md) - [ADR-067: Mempool Refactor](./adr-067-mempool-refactor.md) +- [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md) ### Rejected @@ -99,4 +102,4 @@ Note the context/background should be written in the present tense. - [ADR-057: RPC](./adr-057-RPC.md) - [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) - [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md) -- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) +- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md) diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md index 6fa77a609b..608978207b 100644 --- a/docs/architecture/adr-006-trust-metric.md +++ b/docs/architecture/adr-006-trust-metric.md @@ -178,7 +178,7 @@ type TrustMetricStore struct { } // OnStart implements Service -func (tms *TrustMetricStore) OnStart() error {} +func (tms *TrustMetricStore) OnStart(context.Context) error { return nil } // OnStop implements Service func (tms *TrustMetricStore) OnStop() {} diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md index c234880057..9bb1c245d6 100644 --- a/docs/architecture/adr-071-proposer-based-timestamps.md +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -1,45 +1,13 @@ # ADR 71: Proposer-Based Timestamps -* [Changelog](#changelog) -* [Status](#status) -* [Context](#context) -* [Alternative Approaches](#alternative-approaches) - * [Remove timestamps altogether](#remove-timestamps-altogether) -* [Decision](#decision) -* [Detailed Design](#detailed-design) - * [Overview](#overview) - * [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp) - * [Saving the timestamp across heights](#saving-the-timestamp-across-heights) - * [Changes to `CommitSig`](#changes-to-commitsig) - * [Changes to `Commit`](#changes-to-commit) - * [Changes to `Vote` messages](#changes-to-vote-messages) - * [New consensus parameters](#new-consensus-parameters) - * [Changes to `Header`](#changes-to-header) - * [Changes to the block proposal step](#changes-to-the-block-proposal-step) - * [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp) - * [Proposer selects block timestamp](#proposer-selects-block-timestamp) - * [Proposer waits](#proposer-waits) - * [Changes to the propose step timeout](#changes-to-the-propose-step-timeout) - * [Changes to validation rules](#changes-to-validation-rules) - * [Proposal timestamp validation](#proposal-timestamp-validation) - * [Block timestamp validation](#block-timestamp-validation) - * [Changes to the prevote step](#changes-to-the-prevote-step) - * [Changes to the precommit step](#changes-to-the-precommit-step) - * [Changes to locking a block](#changes-to-locking-a-block) - * [Remove voteTime Completely](#remove-votetime-completely) -* [Future Improvements](#future-improvements) -* [Consequences](#consequences) - * [Positive](#positive) - * [Neutral](#neutral) - * [Negative](#negative) -* [References](#references) - ## Changelog - July 15 2021: Created by @williambanfield - Aug 4 2021: Draft completed by @williambanfield - Aug 5 2021: Draft updated to include data structure changes by @williambanfield - Aug 20 2021: Language edits completed by @williambanfield + - Oct 25 2021: Update the ADR to match updated spec from @cason by @williambanfield + - Nov 10 2021: Additional language updates by @williambanfield per feedback from @cason ## Status @@ -68,7 +36,7 @@ However, their currently known Unix time may be greatly divergent from the block The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues. Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: -1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block. +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block instead of the `BFTTime`. 1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time. The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. @@ -111,45 +79,9 @@ Implementing proposer-based timestamps will require a few changes to Tendermint These changes will be to the following components: * The `internal/consensus/` package. * The `state/` package. -* The `Vote`, `CommitSig`, `Commit` and `Header` types. +* The `Vote`, `CommitSig` and `Header` types. * The consensus parameters. -### Proposal Timestamp and Block Timestamp - -This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message. -The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses. - -The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31). -This timestamp is the current Unix time known to the proposer when sending the `Proposal` message. -This timestamp is not currently used as part of consensus. -The changes in this ADR will begin using the proposal message timestamp as part of consensus. -We will refer to this as the **proposal timestamp** throughout this design. - -The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338). -This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm. -It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block. -This field will continue to be used but the logic for creating and validating this timestamp will change. -We will refer to this as the **block timestamp** throughout this design. - -At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`. -The following image shows this relationship. -The rest of this document describes the code changes that will make this possible. - -![](./img/pbts-message.png) - -### Saving the timestamp across heights - -Currently, `BFTtime` uses `LastCommit` to construct the block timestamp. -The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`. -`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`. - -For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`. -Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`. -To enable this, we will add a `Timestamp` field to the `Commit` struct. -This field will be populated at each height with the proposal timestamp decided on at the previous height. -This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes. -Changes to the `CommitSig` and `Commit` struct are detailed below. - ### Changes to `CommitSig` The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp. @@ -167,32 +99,14 @@ type CommitSig struct { } ``` -### Changes to `Commit` - -The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp. -The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp. -With these timestamps removed, the commit time will instead be stored in the `Commit` struct. - -`Commit` will be updated as follows. - -```diff -type Commit struct { - Height int64 `json:"height"` - Round int32 `json:"round"` -++ Timestamp time.Time `json:"timestamp"` - BlockID BlockID `json:"block_id"` - Signatures []CommitSig `json:"signatures"` -} -``` - ### Changes to `Vote` messages `Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50). This struct currently contains a timestamp. This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator. For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field. -For prevotes, this field is unused. -Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`. +For prevotes, this field is currently unused. +Proposer-based timestamps will use the timestamp that the proposer sets into the block and will therefore no longer require that a timestamp be included in the vote messages. This timestamp is therefore no longer useful and will be dropped. `Vote` will be updated as follows: @@ -250,58 +164,28 @@ type TimestampParams struct { } ``` -### Changes to `Header` +### Changes to the block proposal step -The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp. -This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps. -This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`. -This timestamp will therfore be identical in both the `Header` and the `LastCommit`. -To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`. +#### Proposer selects block timestamp -`Header` will be updated as follows: +Tendermint currently uses the `BFTTime` algorithm to produce the block's `Header.Timestamp`. +The [proposal logic](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/internal/state/state.go#L269) sets the weighted median of the times in the `LastCommit.CommitSigs` as the proposed block's `Header.Timestamp`. -```diff -type Header struct { - // basic block info - Version version.Consensus `json:"version"` - ChainID string `json:"chain_id"` - Height int64 `json:"height"` --- Time time.Time `json:"time"` -++ LastTimestamp time.Time `json:"last_timestamp"` - - // prev block info - LastBlockID BlockID `json:"last_block_id"` - - // hashes of block data - LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"` - DataHash tmbytes.HexBytes `json:"data_hash"` - - // hashes from the app output from the prev block - ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` - NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` - ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` - AppHash tmbytes.HexBytes `json:"app_hash"` - - // root hash of all results from the txs from the previous block - LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` - - // consensus info - EvidenceHash tmbytes.HexBytes `json:"evidence_hash"` - ProposerAddress Address `json:"proposer_address"` -} -``` +In proposer-based timestamps, the proposer will still set a timestamp into the `Header.Timestamp`. +The timestamp the proposer sets into the `Header` will change depending on if the block has previously received a [polka](https://github.com/tendermint/tendermint/blob/053651160f496bb44b107a434e3e6482530bb287/docs/introduction/what-is-tendermint.md#consensus-overview) or not. -### Changes to the block proposal step +#### Proposal of a block that has not previously received a polka -#### Proposer selects proposal timestamp +If a proposer is proposing a new block, then it will set the Unix time currently known to the proposer into the `Header.Timestamp` field. +The proposer will also set this same timestamp into the `Timestamp` field of the `Proposal` message that it issues. -The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message. -This satisfies the proposer-based timestamp specification and does not need to change. +#### Re-proposal of a block that has previously received a polka -#### Proposer selects block timestamp +If a proposer is re-proposing a block that has previously received a polka on the network, then the proposer does not update the `Header.Timestamp` of that block. +Instead, the proposer simply re-proposes the exact same block. +This way, the proposed block has the exact same block ID as the previously proposed block and the validators that have already received that block do not need to attempt to receive it again. -The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field. -The proposer will select this timestamp to use as the block timestamp at height `H`. +The proposer will set the re-proposed block's `Header.Timestamp` as the `Proposal` message's `Timestamp`. #### Proposer waits @@ -310,72 +194,94 @@ In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millis A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. Validator clocks will not be perfectly in sync. -Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`. -If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`. +Therefore, the proposer’s current known Unix time may be less than the previous block's `Header.Time`. +If the proposer’s current known Unix time is less than the previous block's `Header.Time`, the proposer will sleep until its known Unix time exceeds it. This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method. -This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`. +This method should now schedule a timeout that fires when the proposer’s time is greater than the previous block's `Header.Time`. +When the timeout fires, the proposer will finally issue the `Proposal` message. #### Changes to the propose step timeout Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen. -Proposer-based timestamps requires changing this timeout logic. +Proposer-based timestamps requires changing this timeout logic. -The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block. +The proposer will now wait until its current known Unix time exceeds the previous block's `Header.Time` to propose a block. The validators must now take this and some other factors into account when deciding when to timeout the propose step. Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer. Additionally, there may be a delay communicating the proposal message from the proposer to the other validators. -Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out. -To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`. +Therefore, validators waiting for a proposal must wait until after the previous block's `Header.Time` before timing out. +To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until the previous block's `Header.Time + 2*ACCURACY + MSGDELAY`. The spec defines this as `waitingTime`. The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`. `enterPropose` will be changed to calculate waiting time using the new consensus parameters. The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013). -### Changes to validation rules +### Changes to proposal validation rules -The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps. -Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well. +The rules for validating a proposed block will be modification to implement proposer-based timestamps. +We will change the validation logic to ensure that a proposal is `timely`. -#### Proposal timestamp validation +Per the proposer-based timestamps spec, `timely` only needs to be checked if a block has not received a +2/3 majority of `Prevotes` in a round. +If a block previously received a +2/3 majority of prevotes in a previous round, then +2/3 of the voting power considered the block's timestamp near enough to their own currently known Unix time in that round. -Adding proposal timestamp validation is a reasonably straightforward change. -The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31). -Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field. -The precommit and prevote validation logic does not currently use this timestamp. -This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators. -If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid. -The validator will also check that the proposal time is greater than the block timestamp from the previous height. +The validation logic will be updated to check `timely` for blocks that did not previously receive +2/3 prevotes in a round. +Receiving +2/3 prevotes in a round is frequently referred to as a 'polka' and we will use this term for simplicity. -If no valid proposal is received by the proposal timeout, the validator will prevote nil. -This is identical to the current logic. +#### Current timestamp validation logic -#### Block timestamp validation +To provide a better understanding of the changes needed to timestamp validation, we will first detail how timestamp validation works currently in Tendermint. The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118). First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. -Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). -Finally, the logic also authenticates the timestamps in the `LastCommit`. -The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key. -One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`. -To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature. + +Second, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). + +Finally, the validation logic authenticates the timestamps in the `LastCommit.CommitSig`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the voting validator’s private key. +One of the items in this `signedBytes` hash is the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the validator authenticating votes builds a hash of fields that includes the `CommitSig` timestamp and checks this hash against the signature. This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25). -The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change. +#### Remove unused timestamp validation logic `BFTTime` validation is no longer applicable and will be removed. -Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. -This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). +This means that validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. +Specifically, we will remove the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). The `MedianTime` function can be completely removed. -The `LastCommit` timestamps may also be removed. -The `signedBytes` validation logic in `VerifyCommit` will be slightly altered. -The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp. -The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`. -The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp. -Specifically, the `VerifyCommit` function will be updated to use this new timestamp. +Since `CommitSig`s will no longer contain a timestamp, the validator authenticating a commit will no longer include the `CommitSig` timestamp in the hash of fields it builds to check against the cryptographic signature. + +#### Timestamp validation when a block has not received a polka + +The [POLRound](https://github.com/tendermint/tendermint/blob/68ca65f5d79905abd55ea999536b1a3685f9f19d/types/proposal.go#L29) in the `Proposal` message indicates which round the block received a polka. +A negative value in the `POLRound` field indicates that the block has not previously been proposed on the network. +Therefore the validation logic will check for timely when `POLRound < 0`. + +When a validator receives a `Proposal` message, the validator will check that the `Proposal.Timestamp` is at most `PRECISION` greater than the current Unix time known to the validator, and at minimum `PRECISION + MSGDELAY` less than the current Unix time known to the validator. +If the timestamp is not within these bounds, the proposed block will not be considered `timely`. + +Once a full block matching the `Proposal` message is received, the validator will also check that the timestamp in the `Header.Timestamp` of the block matches this `Proposal.Timestamp`. +Using the `Proposal.Timestamp` to check `timely` allows for the `MSGDELAY` parameter to be more finely tuned since `Proposal` messages do not change sizes and are therefore faster to gossip than full blocks across the network. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +#### Timestamp validation when a block has received a polka + +When a block is re-proposed that has already received a +2/3 majority of `Prevote`s on the network, the `Proposal` message for the re-proposed block is created with a `POLRound` that is `>= 0`. +A validator will not check that the `Proposal` is `timely` if the propose message has a non-negative `POLRound`. +If the `POLRound` is non-negative, each validator will simply ensure that it received the `Prevote` messages for the proposed block in the round indicated by `POLRound`. + +If the validator did not receive `Prevote` messages for the proposed block in `POLRound`, then it will prevote nil. +Validators already check that +2/3 prevotes were seen in `POLRound`, so this does not represent a change to the prevote logic. + +A validator will also check that the proposed timestamp is greater than the timestamp of the block for the previous height. +If the timestamp is not greater than the previous block's timestamp, the block will not be considered valid, which is the same as the current logic. + +Additionally, this validation logic can be updated to check that the `Proposal.Timestamp` matches the `Header.Timestamp` of the proposed block, but it is less relevant since checking that votes were received is sufficient to ensure the block timestamp is correct. ### Changes to the prevote step @@ -383,26 +289,14 @@ Currently, a validator will prevote a proposal in one of three cases: * Case 1: Validator has no locked block and receives a valid proposal. * Case 2: Validator has a locked block and receives a valid proposal matching its locked block. -* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the new proposal’s block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +⅔ prevotes for the proposal’s block, either in the current round or in a round greater than or equal to the round in which it locked its locked block. The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. ### Changes to the precommit step The precommit step will not require much modification. -Its proposal validation rules will change in the same ways that validation will change in the prevote step. - -### Changes to locking a block -When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field. -In each subsequent round it will prevote that block. -A validator will only change which block it has locked if it sees +2/3 prevotes for a different block. - -This mechanism will remain largely unchanged. -The only difference is the addition of proposal timestamp validation. -A validator will prevote nil in a round if the proposal message it received is not `timely`. -Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block. -This difference is an incidental result of the changes to prevote validation. -It is included in this design for completeness and to clarify that no additional changes will be made to block locking. +Its proposal validation rules will change in the same ways that validation will change in the prevote step with the exception of the `timely` check: precommit validation will never check that the timestamp is `timely`. ### Remove voteTime Completely diff --git a/docs/architecture/adr-072-request-for-comments.md b/docs/architecture/adr-072-request-for-comments.md index 7eb22ebc96..7b656d05e0 100644 --- a/docs/architecture/adr-072-request-for-comments.md +++ b/docs/architecture/adr-072-request-for-comments.md @@ -6,7 +6,7 @@ ## Status -Proposed +Implemented ## Context diff --git a/docs/architecture/adr-073-libp2p.md b/docs/architecture/adr-073-libp2p.md new file mode 100644 index 0000000000..a73443d67b --- /dev/null +++ b/docs/architecture/adr-073-libp2p.md @@ -0,0 +1,235 @@ +# ADR 073: Adopt LibP2P + +## Changelog + +- 2021-11-02: Initial Draft (@tychoish) + +## Status + +Proposed. + +## Context + + +As part of the 0.35 development cycle, the Tendermint team completed +the first phase of the work described in ADRs 61 and 62, which included a +large scale refactoring of the reactors and the p2p message +routing. This replaced the switch and many of the other legacy +components without breaking protocol or network-level +interoperability and left the legacy connection/socket handling code. + +Following the release, the team has reexamined the state of the code +and the design, as well as Tendermint's requirements. The notes +from that process are available in the [P2P Roadmap +RFC][rfc]. + +This ADR supersedes the decisions made in ADRs 60 and 61, but +builds on the completed portions of this work. Previously, the +boundaries of peer management, message handling, and the higher level +business logic (e.g., "the reactors") were intermingled, and core +elements of the p2p system were responsible for the orchestration of +higher-level business logic. Refactoring the legacy components +made it more obvious that this entanglement of responsibilities +had outsized influence on the entire implementation, making +it difficult to iterate within the current abstractions. +It would not be viable to maintain interoperability with legacy +systems while also achieving many of our broader objectives. + +LibP2P is a thoroughly-specified implementation of a peer-to-peer +networking stack, designed specifically for systems such as +ours. Adopting LibP2P as the basis of Tendermint will allow the +Tendermint team to focus more of their time on other differentiating +aspects of the system, and make it possible for the ecosystem as a +whole to take advantage of tooling and efforts of the LibP2P +platform. + +## Alternative Approaches + +As discussed in the [P2P Roadmap RFC][rfc], the primary alternative would be to +continue development of Tendermint's home-grown peer-to-peer +layer. While that would give the Tendermint team maximal control +over the peer system, the current design is unexceptional on its +own merits, and the prospective maintenance burden for this system +exceeds our tolerances for the medium term. + +Tendermint can and should differentiate itself not on the basis of +its networking implementation or peer management tools, but providing +a consistent operator experience, a battle-tested consensus algorithm, +and an ergonomic user experience. + +## Decision + +Tendermint will adopt libp2p during the 0.37 development cycle, +replacing the bespoke Tendermint P2P stack. This will remove the +`Endpoint`, `Transport`, `Connection`, and `PeerManager` abstractions +and leave the reactors, `p2p.Router` and `p2p.Channel` +abstractions. + +LibP2P may obviate the need for a dedicated peer exchange (PEX) +reactor, which would also in turn obviate the need for a dedicated +seed mode. If this is the case, then all of this functionality would +be removed. + +If it turns out (based on the advice of Protocol Labs) that it makes +sense to maintain separate pubsub or gossipsub topics +per-message-type, then the `Router` abstraction could also +be entirely subsumed. + +## Detailed Design + +### Implementation Changes + +The seams in the P2P implementation between the higher level +constructs (reactors), the routing layer (`Router`) and the lower +level connection and peer management code make this operation +relatively straightforward to implement. A key +goal in this design is to minimize the impact on the reactors +(potentially entirely,) and completely remove the lower level +components (e.g., `Transport`, `Connection` and `PeerManager`) using the +separation afforded by the `Router` layer. The current state of the +code makes these changes relatively surgical, and limited to a small +number of methods: + +- `p2p.Router.OpenChannel` will still return a `Channel` structure + which will continue to serve as a pipe between the reactors and the + `Router`. The implementation will no longer need the queue + implementation, and will instead start goroutines that + are responsible for routing the messages from the channel to libp2p + fundamentals, replacing the current `p2p.Router.routeChannel`. + +- The current `p2p.Router.dialPeers` and `p2p.Router.acceptPeers`, + are responsible for establishing outbound and inbound connections, + respectively. These methods will be removed, along with + `p2p.Router.openConnection`, and the libp2p connection manager will + be responsible for maintaining network connectivity. + +- The `p2p.Channel` interface will change to replace Go + channels with a more functional interface for sending messages. + New methods on this object will take contexts to support safe + cancellation, and return errors, and will block rather than + running asynchronously. The `Out` channel through which + reactors send messages to Peers, will be replaced by a `Send` + method, and the Error channel will be replaced by an `Error` + method. + +- Reactors will be passed an interface that will allow them to + access Peer information from libp2p. This will supplant the + `p2p.PeerUpdates` subscription. + +- Add some kind of heartbeat message at the application level + (e.g. with a reactor,) potentially connected to libp2p's DHT to be + used by reactors for service discovery, message targeting, or other + features. + +- Replace the existing/legacy handshake protocol with [Noise](http://www.noiseprotocol.org/noise.html). + +This project will initially use the TCP-based transport protocols within +libp2p. QUIC is also available as an option that we may implement later. +We will not support mixed networks in the initial release, but will +revisit that possibility later if there is a demonstrated need. + +### Upgrade and Compatibility + +Because the routers and all current P2P libraries are `internal` +packages and not part of the public API, the only changes to the public +API surface area of Tendermint will be different configuration +file options, replacing the current P2P options with options relevant +to libp2p. + +However, it will not be possible to run a network with both networking +stacks active at once, so the upgrade to the version of Tendermint +will need to be coordinated between all nodes of the network. This is +consistent with the expectations around upgrades for Tendermint moving +forward, and will help manage both the complexity of the project and +the implementation timeline. + +## Open Questions + +- What is the role of Protocol Labs in the implementation of libp2p in + tendermint, both during the initial implementation and on an ongoing + basis thereafter? + +- Should all P2P traffic for a given node be pushed to a single topic, + so that a topic maps to a specific ChainID, or should + each reactor (or type of message) have its own topic? How many + topics can a libp2p network support? Is there testing that validates + the capabilities? + +- Tendermint presently provides a very coarse QoS-like functionality + using priorities based on message-type. + This intuitively/theoretically ensures that evidence and consensus + messages don't get starved by blocksync/statesync messages. It's + unclear if we can or should attempt to replicate this with libp2p. + +- What kind of QoS functionality does libp2p provide and what kind of + metrics does libp2p provide about it's QoS functionality? + +- Is it possible to store additional (and potentially arbitrary) + information into the DHT as part of the heartbeats between nodes, + such as the latest height, and then access that in the + reactors. How frequently can the DHT be updated? + +- Does it make sense to have reactors continue to consume inbound + messages from a Channel (`In`) or is there another interface or + pattern that we should consider? + + - We should avoid exposing Go channels when possible, and likely + some kind of alternate iterator likely makes sense for processing + messages within the reactors. + +- What are the security and protocol implications of tracking + information from peer heartbeats and exposing that to reactors? + +- How much (or how little) configuration can Tendermint provide for + libp2p, particularly on the first release? + + - In general, we should not support byo-functionality for libp2p + components within Tendermint, and reduce the configuration surface + area, as much as possible. + +- What are the best ways to provide request/response semantics for + reactors on top of libp2p? Will it be possible to add + request/response semantics in a future release or is there + anticipatory work that needs to be done as part of the initial + release? + +## Consequences + +### Positive + +- Reduce the maintenance burden for the Tendermint Core team by + removing a large swath of legacy code that has proven to be + difficult to modify safely. + +- Remove the responsibility for maintaining and developing the entire + peer management system (p2p) and stack. + +- Provide users with a more stable peer and networking system, + Tendermint can improve operator experience and network stability. + +### Negative + +- By deferring to library implementations for peer management and + networking, Tendermint loses some flexibility for innovating at the + peer and networking level. However, Tendermint should be innovating + primarily at the consensus layer, and libp2p does not preclude + optimization or development in the peer layer. + +- Libp2p is a large dependency and Tendermint would become dependent + upon Protocol Labs' release cycle and prioritization for bug + fixes. If this proves onerous, it's possible to maintain a vendor + fork of relevant components as needed. + +### Neutral + +- N/A + +## References + +- [ADR 61: P2P Refactor Scope][adr61] +- [ADR 62: P2P Architecture][adr62] +- [P2P Roadmap RFC][rfc] + +[adr61]: ./adr-061-p2p-refactor-scope.md +[adr62]: ./adr-062-p2p-architecture.md +[rfc]: ../rfc/rfc-000-p2p.rst diff --git a/docs/architecture/adr-074-timeout-params.md b/docs/architecture/adr-074-timeout-params.md new file mode 100644 index 0000000000..e3e1a18001 --- /dev/null +++ b/docs/architecture/adr-074-timeout-params.md @@ -0,0 +1,203 @@ +# ADR 74: Migrate Timeout Parameters to Consensus Parameters + +## Changelog + +- 03-Jan-2022: Initial draft (@williambanfield) +- 13-Jan-2022: Updated to indicate work on upgrade path needed (@williambanfield) + +## Status + +Proposed + +## Context + +### Background + +Tendermint's consensus timeout parameters are currently configured locally by each validator +in the validator's [config.toml][config-toml]. +This means that the validators on a Tendermint network may have different timeouts +from each other. There is no reason for validators on the same network to configure +different timeout values. Proper functioning of the Tendermint consensus algorithm +relies on these parameters being uniform across validators. + +The configurable values are as follows: + +* `TimeoutPropose` + * How long the consensus algorithm waits for a proposal block before issuing a prevote. + * If no prevote arrives by `TimeoutPropose`, then the consensus algorithm will issue a nil prevote. +* `TimeoutProposeDelta` + * How much the `TimeoutPropose` grows each round. +* `TimeoutPrevote` + * How long the consensus algorithm waits after receiving +2/3 prevotes with + no quorum for a value before issuing a precommit for nil. + (See the [arXiv paper][arxiv-paper], Algorithm 1, Line 34) +* `TimeoutPrevoteDelta` + * How much the `TimeoutPrevote` increases with each round. +* `TimeoutPrecommit` + * How long the consensus algorithm waits after receiving +2/3 precommits that + do not have a quorum for a value before entering the next round. + (See the [arXiv paper][arxiv-paper], Algorithm 1, Line 47) +* `TimeoutPrecommitDelta` + * How much the `TimeoutPrecommit` increases with each round. +* `TimeoutCommit` + * How long the consensus algorithm waits after committing a block but before starting the new height. + * This gives a validator a chance to receive slow precommits. +* `SkipTimeoutCommit` + * Make progress as soon as the node has 100% of the precommits. + + +### Overview of Change + +We will consolidate the timeout parameters and migrate them from the node-local +`config.toml` file into the network-global consensus parameters. + +The 8 timeout parameters will be consolidated down to 6. These will be as follows: + +* `TimeoutPropose` + * Same as current `TimeoutPropose`. +* `TimeoutProposeDelta` + * Same as current `TimeoutProposeDelta`. +* `TimeoutVote` + * How long validators wait for votes in both the prevote + and precommit phase of the consensus algorithm. This parameter subsumes + the current `TimeoutPrevote` and `TimeoutPrecommit` parameters. +* `TimeoutVoteDelta` + * How much the `TimeoutVote` will grow each successive round. + This parameter subsumes the current `TimeoutPrevoteDelta` and `TimeoutPrecommitDelta` + parameters. +* `TimeoutCommit` + * Same as current `TimeoutCommit`. +* `EnableTimeoutCommitBypass` + * Same as current `SkipTimeoutCommit`, renamed for clarity. + +A safe default will be provided by Tendermint for each of these parameters and +networks will be able to update the parameters as they see fit. Local updates +to these parameters will no longer be possible; instead, the application will control +updating the parameters. Applications using the Cosmos SDK will be automatically be +able to change the values of these consensus parameters [via a governance proposal][cosmos-sdk-consensus-params]. + +This change is low-risk. While parameters are locally configurable, many running chains +do not change them from their default values. For example, initializing +a node on Osmosis, Terra, and the Cosmos Hub using the their `init` command produces +a `config.toml` with Tendermint's default values for these parameters. + +### Why this parameter consolidation? + +Reducing the number of parameters is good for UX. Fewer superfluous parameters makes +running and operating a Tendermint network less confusing. + +The Prevote and Precommit messages are both similar sizes, require similar amounts +of processing so there is no strong need for them to be configured separately. + +The `TimeoutPropose` parameter governs how long Tendermint will wait for the proposed +block to be gossiped. Blocks are much larger than votes and therefore tend to be +gossiped much more slowly. It therefore makes sense to keep `TimeoutPropose` and +the `TimeoutProposeDelta` as parameters separate from the vote timeouts. + +`TimeoutCommit` is used by chains to ensure that the network waits for the votes from +slower validators before proceeding to the next height. Without this timeout, the votes +from slower validators would consistently not be included in blocks and those validators +would not be counted as 'up' from the chain's perspective. Being down damages a validator's +reputation and causes potential stakers to think twice before delegating to that validator. + +`TimeoutCommit` also prevents the network from producing the next height as soon as validators +on the fastest hardware with a summed voting power of +2/3 of the network's total have +completed execution of the block. Allowing the network to proceed as soon as the fastest ++2/3 completed execution would have a cumulative effect over heights, eventually +leaving slower validators unable to participate in consensus at all. `TimeoutCommit` +therefore allows networks to have greater variability in hardware. Additional +discussion of this can be found in [tendermint issue 5911][tendermint-issue-5911-comment] +and [spec issue 359][spec-issue-359]. + +## Alternative Approaches + +### Hardcode the parameters + +Many Tendermint networks run on similar cloud-hosted infrastructure. Therefore, +they have similar bandwidth and machine resources. The timings for propagating votes +and blocks are likely to be reasonably similar across networks. As a result, the +timeout parameters are good candidates for being hardcoded. Hardcoding the timeouts +in Tendermint would mean entirely removing these parameters from any configuration +that could be altered by either an application or a node operator. Instead, +Tendermint would ship with a set of timeouts and all applications using Tendermint +would use this exact same set of values. + +While Tendermint nodes often run with similar bandwidth and on similar cloud-hosted +machines, there are enough points of variability to make configuring +consensus timeouts meaningful. Namely, Tendermint network topologies are likely to be +very different from chain to chain. Additionally, applications may vary greatly in +how long the `Commit` phase may take. Applications that perform more work during `Commit` +require a longer `TimeoutCommit` to allow the application to complete its work +and be prepared for the next height. + +## Decision + +The decision has been made to implement this work, with the caveat that the +specific mechanism for introducing the new parameters to chains is still ongoing. + +## Detailed Design + +### New Consensus Parameters + +A new `TimeoutParams` `message` will be added to the [params.proto file][consensus-params-proto]. +This message will have the following form: + +```proto +message TimeoutParams { + google.protobuf.Duration propose = 1; + google.protobuf.Duration propose_delta = 2; + google.protobuf.Duration vote = 3; + google.protobuf.Duration vote_delta = 4; + google.protobuf.Duration commit = 5; + bool enable_commit_timeout_bypass = 6; +} +``` + +This new message will be added as a field into the [`ConsensusParams` +message][consensus-params-proto]. The same default values that are [currently +set for these parameters][current-timeout-defaults] in the local configuration +file will be used as the defaults for these new consensus parameters in the +[consensus parameter defaults][default-consensus-params]. + +The new consensus parameters will be subject to the same +[validity rules][time-param-validation] as the current configuration values, +namely, each value must be non-negative. + +### Migration + +The new `ConsensusParameters` will be added during an upcoming release. In this +release, the old `config.toml` parameters will cease to control the timeouts and +an error will be logged on nodes that continue to specify these values. The specific +mechanism by which these parameters will added to a chain is being discussed in +[RFC-009][rfc-009] and will be decided ahead of the next release. + +The specific mechanism for adding these parameters depends on work related to +[soft upgrades][soft-upgrades], which is still ongoing. + +## Consequences + +### Positive + +* Timeout parameters will be equal across all of the validators in a Tendermint network. +* Remove superfluous timeout parameters. + +### Negative + +### Neutral + +* Timeout parameters require consensus to change. + +## References + +[conseusus-params-proto]: https://github.com/tendermint/spec/blob/a00de7199f5558cdd6245bbbcd1d8405ccfb8129/proto/tendermint/types/params.proto#L11 +[hashed-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L49 +[default-consensus-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L79 +[current-timeout-defaults]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L955 +[config-toml]: https://github.com/tendermint/tendermint/blob/5cc980698a3402afce76b26693ab54b8f67f038b/config/toml.go#L425-L440 +[cosmos-sdk-consensus-params]: https://github.com/cosmos/cosmos-sdk/issues/6197 +[time-param-validation]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L1038 +[tendermint-issue-5911-comment]: https://github.com/tendermint/tendermint/issues/5911#issuecomment-973560381 +[spec-issue-359]: https://github.com/tendermint/spec/issues/359 +[arxiv-paper]: https://arxiv.org/pdf/1807.04938.pdf +[soft-upgrades]: https://github.com/tendermint/spec/pull/222 +[rfc-009]: https://github.com/tendermint/tendermint/pull/7524 diff --git a/docs/architecture/adr-075-rpc-subscription.md b/docs/architecture/adr-075-rpc-subscription.md new file mode 100644 index 0000000000..c151e7dcb1 --- /dev/null +++ b/docs/architecture/adr-075-rpc-subscription.md @@ -0,0 +1,682 @@ +# ADR 075: RPC Event Subscription Interface + +## Changelog + +- 26-Jan-2022: Marked accepted. +- 22-Jan-2022: Updated and expanded (@creachadair). +- 20-Nov-2021: Initial draft (@creachadair). + +--- +## Status + +Accepted + +--- +## Background & Context + +For context, see [RFC 006: Event Subscription][rfc006]. + +The [Tendermint RPC service][rpc-service] permits clients to subscribe to the +event stream generated by a consensus node. This allows clients to observe the +state of the consensus network, including details of the consensus algorithm +state machine, proposals, transaction delivery, and block completion. The +application may also attach custom key-value attributes to events to expose +application-specific details to clients. + +The event subscription API in the RPC service currently comprises three methods: + +1. `subscribe`: A request to subscribe to the events matching a specific + [query expression][query-grammar]. Events can be filtered by their key-value + attributes, including custom attributes provided by the application. + +2. `unsubscribe`: A request to cancel an existing subscription based on its + query expression. + +3. `unsubscribe_all`: A request to cancel all existing subscriptions belonging + to the client. + +There are some important technical and UX issues with the current RPC event +subscription API. The rest of this ADR outlines these problems in detail, and +proposes a new API scheme intended to address them. + +### Issue 1: Persistent connections + +To subscribe to a node's event stream, a client needs a persistent connection +to the node. Unlike the other methods of the service, for which each call is +serviced by a short-lived HTTP round trip, subscription delivers a continuous +stream of events to the client by hijacking the HTTP channel for a websocket. +The stream (and hence the HTTP request) persists until either the subscription +is explicitly cancelled, or the connection is closed. + +There are several problems with this API: + +1. **Expensive per-connection state**: The server must maintain a substantial + amount of state per subscriber client: + + - The current implementation uses a [WebSocket][ws] for each active + subscriber. The connection must be maintained even if there are no + matching events for a given client. + + The server can drop idle connections to save resources, but doing so + terminates all subscriptions on those connections and forces those clients + to re-connect, adding additional resource churn for the server. + + - In addition, the server maintains a separate buffer of undelivered events + for each client. This is to reduce the dual risks that a client will miss + events, and that a slow client could "push back" on the publisher, + impeding the progress of consensus. + + Because event traffic is quite bursty, queues can potentially take up a + lot of memory. Moreover, each subscriber may have a different filter + query, so the server winds up having to duplicate the same events among + multiple subscriber queues. Not only does this add memory pressure, but it + does so most at the worst possible time, i.e., when the server is already + under load from high event traffic. + +2. **Operational access control is difficult**: The server's websocket + interface exposes _all_ the RPC service endpoints, not only the subscription + methods. This includes methods that allow callers to inject arbitrary + transactions (`broadcast_tx_*`) and evidence (`broadcast_evidence`) into the + network, remove transactions (`remove_tx`), and request arbitrary amounts of + chain state. + + Filtering requests to the GET endpoint is straightforward: A reverse proxy + like [nginx][nginx] can easily filter methods by URL path. Filtering POST + requests takes a bit more work, but can be managed with a filter program + that speaks [FastCGI][fcgi] and parses JSON-RPC request bodies. + + Filtering the websocket interface requires a dedicated proxy implementation. + Although nginx can [reverse-proxy websockets][rp-ws], it does not support + filtering websocket traffic via FastCGI. The operator would need to either + implement a custom [nginx extension module][ng-xm] or build and run a + standalone proxy that implements websocket and filters each session. Apart + from the work, this also makes the system even more resource intensive, as + well as introducing yet another connection that could potentially time out + or stall on full buffers. + + Even for the simple case of restricting access to only event subscription, + there is no easy solution currently: Once a caller has access to the + websocket endpoint, it has complete access to the RPC service. + +### Issue 2: Inconvenient client API + +The subscription interface has some inconvenient features for the client as +well as the server. These include: + +1. **Non-standard protocol:** The RPC service is mostly [JSON-RPC 2.0][jsonrpc2], + but the subscription interface diverges from the standard. + + In a standard JSON-RPC 2.0 call, the client initiates a request to the + server with a unique ID, and the server concludes the call by sending a + reply for that ID. The `subscribe` implementation, however, sends multiple + responses to the client's request: + + - The client sends `subscribe` with some ID `x` and the desired query + + - The server responds with ID `x` and an empty confirmation response. + + - The server then (repeatedly) sends event result responses with ID `x`, one + for each item with a matching event. + + Standard JSON-RPC clients will reject the subsequent replies, as they + announce a request ID (`x`) that is already complete. This means a caller + has to implement Tendermint-specific handling for these responses. + + Moreover, the result format is different between the initial confirmation + and the subsequent responses. This means a caller has to implement special + logic for decoding the first response versus the subsequent ones. + +2. **No way to detect data loss:** The subscriber connection can be terminated + for many reasons. Even ignoring ordinary network issues (e.g., packet loss): + + - The server will drop messages and/or close the websocket if its write + buffer fills, or if the queue of undelivered matching events is not + drained fast enough. The client has no way to discover that messages were + dropped even if the connection remains open. + + - Either the client or the server may close the websocket if the websocket + PING and PONG exchanges are not handled correctly, or frequently enough. + Even if correctly implemented, this may fail if the system is under high + load and cannot service those control messages in a timely manner. + + When the connection is terminated, the server drops all the subscriptions + for that client (as if it had called `unsubscribe_all`). Even if the client + reconnects, any events that were published during the period between the + disconnect and re-connect and re-subscription will be silently lost, and the + client has no way to discover that it missed some relevant messages. + +3. **No way to replay old events:** Even if a client knew it had missed some + events (due to a disconnection, for example), the API provides no way for + the client to "play back" events it may have missed. + +4. **Large response sizes:** Some event data can be quite large, and there can + be substantial duplication across items. The API allows the client to select + _which_ events are reported, but has no way to control which parts of a + matching event it wishes to receive. + + This can be costly on the server (which has to marshal those data into + JSON), the network, and the client (which has to unmarshal the result and + then pick through for the components that are relevant to it). + + Besides being inefficient, this also contributes to some of the persistent + connection issues mentioned above, e.g., filling up the websocket write + buffer and forcing the server to queue potentially several copies of a large + value in memory. + +5. **Client identity is tied to network address:** The Tendermint event API + identifies each subscriber by a (Client ID, Query) pair. In the RPC service, + the query is provided by the client, but the client ID is set to the TCP + address of the client (typically "host:port" or "ip:port"). + + This means that even if the server did _not_ drop subscriptions immediately + when the websocket connection is closed, a client may not be able to + reattach to its existing subscription. Dialing a new connection is likely + to result in a different port (and, depending on their own proxy setup, + possibly a different public IP). + + In isolation, this problem would be easy to work around with a new + subscription parameter, but it would require several other changes to the + handling of event subscriptions for that workaround to become useful. + +--- +## Decision + +To address the described problems, we will: + +1. Introduce a new API for event subscription to the Tendermint RPC service. + The proposed API is described in [Detailed Design](#detailed-design) below. + +2. This new API will target the Tendermint v0.36 release, during which the + current ("streaming") API will remain available as-is, but deprecated. + +3. The streaming API will be entirely removed in release v0.37, which will + require all users of event subscription to switch to the new API. + +> **Point for discussion:** Given that ABCI++ and PBTS are the main priorities +> for v0.36, it would be fine to slip the first phase of this work to v0.37. +> Unless there is a time problem, however, the proposed design does not disrupt +> the work on ABCI++ or PBTS, and will not increase the scope of breaking +> changes. Therefore the plan is to begin in v0.36 and slip only if necessary. + +--- +## Detailed Design + +### Design Goals + +Specific goals of this design include: + +1. Remove the need for a persistent connection to each subscription client. + Subscribers should use the same HTTP request flow for event subscription + requests as for other RPC calls. + +2. The server retains minimal state (possibly none) per-subscriber. In + particular: + + - The server does not buffer unconsumed writes nor queue undelivered events + on a per-client basis. + - A client that stalls or goes idle does not cost the server any resources. + - Any event data that is buffered or stored is shared among _all_ + subscribers, and is not duplicated per client. + +3. Slow clients have no impact (or minimal impact) on the rate of progress of + the consensus algorithm, beyond the ambient overhead of servicing individual + RPC requests. + +4. Clients can tell when they have missed events matching their subscription, + within some reasonable (configurable) window of time, and can "replay" + events within that window to catch up. + +5. Nice to have: It should be easy to use the event subscription API from + existing standard tools and libraries, including command-line use for + testing and experimentation. + +### Definitions + +- The **event stream** of a node is a single, time-ordered, heterogeneous + stream of event items. + +- Each **event item** comprises an **event datum** (for example, block header + metadata for a new-block event), and zero or more optional **events**. + +- An **event** means the [ABCI `Event` data type][abci-event], which comprises + a string type and zero or more string key-value **event attributes**. + + The use of the new terms "event item" and "event datum" is to avert confusion + between the values that are published to the event bus (what we call here + "event items") and the ABCI `Event` data type. + +- The node assigns each event item a unique identifier string called a + **cursor**. A cursor must be unique among all events published by a single + node, but it is not required to be unique globally across nodes. + + Cursors are time-ordered so that given event items A and B, if A was + published before B, then cursor(A) < cursor(B) in lexicographic order. + + A minimum viable cursor implementation is a tuple consisting of a timestamp + and a sequence number (e.g., `16CCC798FB5F4670-0123`). However, it may also + be useful to append basic type information to a cursor, to allow efficient + filtering (e.g., `16CCC87E91869050-0091:BeginBlock`). + + The initial implementation will use the minimum viable format. + +### Discussion + +The node maintains an **event log**, a shared ordered record of the events +published to its event bus within an operator-configurable time window. The +initial implementation will store the event log in-memory, and the operator +will be given two per-node configuration settings. Note, these names are +provisional: + +- `[event-subscription] time-window`: A duration before present during which the + node will retain event items published. Setting this value to zero disables + event subscription. + +- `[event-subscription] max-items`: A maximum number of event items that the + node will retain within the time window. If the number of items exceeds this + value, the node discardes the oldest items in the window. Setting this value + to zero means that no limit is imposed on the number of items. + +The node will retain all events within the time window, provided they do not +exceed the maximum number. These config parameters allow the operator to +loosely regulate how much memory and storage the node allocates to the event +log. The client can use the server reply to tell whether the events it wants +are still available from the event log. + +The event log is shared among all subscribers to the node. + +> **Discussion point:** Should events persist across node restarts? +> +> The current event API does not persist events across restarts, so this new +> design does not either. Note, however, that we may "spill" older event data +> to disk as a way of controlling memory use. Such usage is ephemeral, however, +> and does not need to be tracked as node data (e.g., it could be temp files). + +### Query API + +To retrieve event data, the client will call the (new) RPC method `events`. +The parameters of this method will correspond to the following Go types: + +```go +type EventParams struct { + // Optional filter spec. If nil or empty, all items are eligible. + Filter *Filter `json:"filter"` + + // The maximum number of eligible results to return. + // If zero or negative, the server will report a default number. + MaxResults int `json:"max_results"` + + // Return only items after this cursor. If empty, the limit is just + // before the the beginning of the event log. + After string `json:"after_item"` + + // Return only items before this cursor. If empty, the limit is just + // after the head of the event log. + Before string `json:"before_item"` + + // Wait for up to this long for events to be available. + WaitTime time.Duration `json:"wait_time"` +} + +type Filter struct { + Query string `json:"query"` +} +``` + +> **Discussion point:** The initial implementation will not cache filter +> queries for the client. If this turns out to be a performance issue in +> production, the service can keep a small shared cache of compiled queries. +> Given the improvements from #7319 et seq., this should not be necessary. + +> **Discussion point:** For the initial implementation, the new API will use +> the existing query language as-is. Future work may extend the Filter message +> with a more structured and/or expressive query surface, but that is beyond +> the scope of this design. + +The semantics of the request are as follows: An item in the event log is +**eligible** for a query if: + +- It is newer than the `after_item` cursor (if set). +- It is older than the `before_item` cursor (if set). +- It matches the filter (if set). + +Among the eligible items in the log, the server returns up to `max_results` of +the newest items, in reverse order of cursor. If `max_results` is unset the +server chooses a number to return, and will cap `max_results` at a sensible +limit. + +The `wait_time` parameter is used to effect polling. If `before_item` is empty, +the server will wait for up to `wait_time` for additional items, if there are +fewer than `max_results` eligible results in the log. If `wait_time` is zero, +the server will return whatever eligible items are available immediately. + +If `before_item` non-empty, `wait_time` is ignored: new results are only added +to the head of the log, so there is no need to wait. This allows the client to +poll for new data, and "page" backward through matching event items. This is +discussed in more detail below. + +The server will set a sensible cap on the maximum `wait_time`, overriding +client-requested intervals longer than that. + +A successful reply from the `events` request corresponds to the following Go +types: + +```go +type EventReply struct { + // The items matching the request parameters, from newest + // to oldest, if any were available within the timeout. + Items []*EventItem `json:"items"` + + // This is true if there is at least one older matching item + // available in the log that was not returned. + More bool `json:"more"` + + // The cursor of the oldest item in the log at the time of this reply, + // or "" if the log is empty. + Oldest string `json:"oldest_item"` + + // The cursor of the newest item in the log at the time of this reply, + // or "" if the log is empty. + Newest string `json:"newest_item"` +} + +type EventItem struct { + // The cursor of this item. + Cursor string `json:"cursor"` + + // The encoded event data for this item. + // The type identifies the structure of the value. + Data struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` + } `json:"data"` +} +``` + +The `oldest_item` and `newest_item` fields of the reply report the cursors of +the oldest and newest items (of any kind) recorded in the event log at the time +of the reply, or are `""` if the log is empty. + +The `data` field contains the type-specific event datum. The datum carries any +ABCI events that may have been defined. + +> **Discussion point**: Based on [issue #7273][i7273], I did not include a +> separate field in the response for the ABCI events, since it duplicates data +> already stored elsewhere in the event data. + +The semantics of the reply are as follows: + +- If `items` is non-empty: + + - Items are ordered from newest to oldest. + + - If `more` is true, there is at least one additional, older item in the + event log that was not returned (in excess of `max_results`). + + In this case the client can fetch the next page by setting `before_item` + in a new request, to the cursor of the oldest item fetched (i.e., the + last one in `items`). + + - Otherwise (if `more` is false), all the matching results have been + reported (pagination is complete). + + - The first element of `items` identifies the newest item considered. + Subsequent poll requests can set `after_item` to this cursor to skip + items that were already retrieved. + +- If `items` is empty: + + - If the `before_item` was set in the request, there are no further + eligible items for this query in the log (pagination is complete). + + This is just a safety case; the client can detect this without issuing + another call by consulting the `more` field of the previous reply. + + - If the `before_item` was empty in the request, no eligible items were + available before the `wait_time` expired. The client may poll again to + wait for more event items. + +A client can store cursor values to detect data loss and to recover from +crashes and connectivity issues: + +- After a crash, the client requests events after the newest cursor it has + seen. If the reply indicates that cursor is no longer in range, the client + may (conservatively) conclude some event data may have been lost. + +- On the other hand, if it _is_ in range, the client can then page back through + the results that it missed, and then resume polling. As long as its recovery + cursor does not age out before it finishes, the client can be sure it has all + the relevant results. + +### Other Notes + +- The new API supports two general "modes" of operation: + + 1. In ordinary operation, clients will **long-poll** the head of the event + log for new events matching their criteria (by setting a `wait_time` and + no `before_item`). + + 2. If there are more events than the client requested, or if the client needs + to to read older events to recover from a stall or crash, clients will + **page** backward through the event log (by setting `before_item` and + possibly `after_item`). + +- While the new API requires explicit polling by the client, it makes better + use of the node's existing HTTP infrastructure (e.g., connection pools). + Moreover, the direct implementation is easier to use from standard tools and + client libraries for HTTP and JSON-RPC. + + Explicit polling does shift the burden of timeliness to the client. That is + arguably preferable, however, given that the RPC service is ancillary to the + node's primary goal, viz., consensus. The details of polling can be easily + hidden from client applications with simple libraries. + +- The format of a cursor is considered opaque to the client. Clients must not + parse cursor values, but they may rely on their ordering properties. + +- To maintain the event log, the server must prune items outside the time + window and in excess of the item limit. + + The initial implementation will do this by checking the tail of the event log + after each new item is published. If the number of items in the log exceeds + the item limit, it will delete oldest items until the log is under the limit; + then discard any older than the time window before present. + + To minimize coordination interference between the publisher (the event bus) + and the subcribers (the `events` service handlers), the event log will be + stored as a persistent linear queue with shared structure (a cons list). A + single reader-writer mutex will guard the "head" of the queue where new + items are published: + + - **To publish a new item**, the publisher acquires the write lock, conses a + new item to the front of the existing queue, and replaces the head pointer + with the new item. + + - **To scan the queue**, a reader acquires the read lock, captures the head + pointer, and then releases the lock. The rest of its request can be served + without holding a lock, since the queue structure will not change. + + When a reader wants to wait, it will yield the lock and wait on a condition + that is signaled when the publisher swings the pointer. + + - **To prune the queue**, the publisher (who is the sole writer) will track + the queue length and the age of the oldest item separately. When the + length and or age exceed the configured bounds, it will construct a new + queue spine on the same items, discarding out-of-band values. + + Pruning can be done while the publisher already holds the write lock, or + could be done outside the lock entirely: Once the new queue is constructed, + the lock can be re-acquired to swing the pointer. This costs some extra + allocations for the cons cells, but avoids duplicating any event items. + The pruning step is a simple linear scan down the first (up to) max-items + elements of the queue, to find the breakpoint of age and length. + + Moreover, the publisher can amortize the cost of pruning by item count, if + necessary, by pruning length "more aggressively" than the configuration + requires (e.g., reducing to 3/4 of the maximum rather than 1/1). + + The state of the event log before the publisher acquires the lock: + ![Before publish and pruning](./img/adr-075-log-before.png) + + After the publisher has added a new item and pruned old ones: + ![After publish and pruning](./img/adr-075-log-after.png) + +### Migration Plan + +This design requires that clients eventually migrate to the new event +subscription API, but provides a full release cycle with both APIs in place to +make this burden more tractable. The migration strategy is broadly: + +**Phase 1**: Release v0.36. + +- Implement the new `events` endpoint, keeping the existing methods as they are. +- Update the Go clients to support the new `events` endpoint, and handle polling. +- Update the old endpoints to log annoyingly about their own deprecation. +- Write tutorials about how to migrate client usage. + +At or shortly after release, we should proactively update the Cosmos SDK to use +the new API, to remove a disincentive to upgrading. + +**Phase 2**: Release v0.37 + +- During development, we should actively seek out any existing users of the + streaming event subscription API and help them migrate. +- Possibly also: Spend some time writing clients for JS, Rust, et al. +- Release: Delete the old implementation and all the websocket support code. + +> **Discussion point**: Even though the plan is to keep the existing service, +> we might take the opportunity to restrict the websocket endpoint to _only_ +> the event streaming service, removing the other endpoints. To minimize the +> disruption for users in the v0.36 cycle, I have decided not to do this for +> the first phase. +> +> If we wind up pushing this design into v0.37, however, we should re-evaulate +> this partial turn-down of the websocket. + +### Future Work + +- This design does not immediately address the problem of allowing the client + to control which data are reported back for event items. That concern is + deferred to future work. However, it would be straightforward to extend the + filter and/or the request parameters to allow more control. + +- The node currently stores a subset of event data (specifically the block and + transaction events) for use in reindexing. While these data are redundant + with the event log described in this document, they are not sufficient to + cover event subscription, as they omit other event types. + + In the future we should investigate consolidating or removing event data from + the state store entirely. For now this issue is out of scope for purposes of + updating the RPC API. We may be able to piggyback on the database unification + plans (see [RFC 001][rfc001]) to store the event log separately, so its + pruning policy does not need to be tied to the block and state stores. + +- This design reuses the existing filter query language from the old API. In + the future we may want to use a more structured and/or expressive query. The + Filter object can be extended with more fields as needed to support this. + +- Some users have trouble communicating with the RPC service because of + configuration problems like improperly-set CORS policies. While this design + does not address those issues directly, we might want to revisit how we set + policies in the RPC service to make it less susceptible to confusing errors + caused by misconfiguration. + +--- +## Consequences + +- ✅ Reduces the number of transport options for RPC. Supports [RFC 002][rfc002]. +- ️✅ Removes the primary non-standard use of JSON-RPC. +- ⛔️ Forces clients to migrate to a different API (eventually). +- ↕️ API requires clients to poll, but this reduces client state on the server. +- ↕️ We have to maintain both implementations for a whole release, but this + gives clients time to migrate. + +--- +## Alternative Approaches + +The following alternative approaches were considered: + +1. **Leave it alone.** Since existing tools mostly already work with the API as + it stands today, we could leave it alone and do our best to improve its + performance and reliability. + + Based on many issues reported by users and node operators (e.g., + [#3380][i3380], [#6439][i6439], [#6729][i6729], [#7247][i7247]), the + problems described here affect even the existing use that works. Investing + further incremental effort in the existing API is unlikely to address these + issues. + +2. **Design a better streaming API.** Instead of polling, we might try to + design a better "streaming" API for event subscription. + + A significant advantage of switching away from streaming is to remove the + need for persistent connections between the node and subscribers. A new + streaming protocol design would lose that advantage, and would still need a + way to let clients recover and replay. + + This approach might look better if we decided to use a different protocol + for event subscription, say gRPC instead of JSON-RPC. That choice, however, + would be just as breaking for existing clients, for marginal benefit. + Moreover, this option increases both the complexity and the resource cost on + the node implementation. + + Given that resource consumption and complexity are important considerations, + this option was not chosen. + +3. **Defer to an external event broker.** We might remove the entire event + subscription infrastructure from the node, and define an optional interface + to allow the node to publish all its events to an external event broker, + such as Apache Kafka. + + This has the advantage of greatly simplifying the node, but at a great cost + to the node operator: To enable event subscription in this design, the + operator has to stand up and maintain a separate process in communion with + the node, and configuration changes would have to be coordinated across + both. + + Moreover, this approach would be highly disruptive to existing client use, + and migration would probably require switching to third-party libraries. + Despite the potential benefits for the node itself, the costs to operators + and clients seems too large for this to be the best option. + + Publishing to an external event broker might be a worthwhile future project, + if there is any demand for it. That decision is out of scope for this design, + as it interacts with the design of the indexer as well. + +--- +## References + +- [RFC 006: Event Subscription][rfc006] +- [Tendermint RPC service][rpc-service] +- [Event query grammar][query-grammar] +- [RFC 6455: The WebSocket protocol][ws] +- [JSON-RPC 2.0 Specification][jsonrpc2] +- [Nginx proxy server][nginx] + - [Proxying websockets][rp-ws] + - [Extension modules][ng-xm] +- [FastCGI][fcgi] +- [RFC 001: Storage Engines & Database Layer][rfc001] +- [RFC 002: Interprocess Communication in Tendermint][rfc002] +- Issues: + - [rpc/client: test that client resubscribes upon disconnect][i3380] (#3380) + - [Too high memory usage when creating many events subscriptions][i6439] (#6439) + - [Tendermint emits events faster than clients can pull them][i6729] (#6729) + - [indexer: unbuffered event subscription slow down the consensus][i7247] (#7247) + - [rpc: remove duplication of events when querying][i7273] (#7273) + +[rfc006]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-006-event-subscription.md +[rpc-service]: https://docs.tendermint.com/master/rpc +[query-grammar]: https://pkg.go.dev/github.com/tendermint/tendermint@master/internal/pubsub/query/syntax +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[jsonrpc2]: https://www.jsonrpc.org/specification +[nginx]: https://nginx.org/en/docs/ +[fcgi]: http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html +[rp-ws]: https://nginx.org/en/docs/http/websocket.html +[ng-xm]: https://www.nginx.com/resources/wiki/extending/ +[abci-event]: https://pkg.go.dev/github.com/tendermint/tendermint/abci/types#Event +[rfc001]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-storage-engine.rst +[rfc002]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md +[i3380]: https://github.com/tendermint/tendermint/issues/3380 +[i6439]: https://github.com/tendermint/tendermint/issues/6439 +[i6729]: https://github.com/tendermint/tendermint/issues/6729 +[i7247]: https://github.com/tendermint/tendermint/issues/7247 +[i7273]: https://github.com/tendermint/tendermint/issues/7273 diff --git a/docs/architecture/img/adr-075-log-after.png b/docs/architecture/img/adr-075-log-after.png new file mode 100644 index 0000000000..359f205e49 Binary files /dev/null and b/docs/architecture/img/adr-075-log-after.png differ diff --git a/docs/architecture/img/adr-075-log-before.png b/docs/architecture/img/adr-075-log-before.png new file mode 100644 index 0000000000..813b9d257b Binary files /dev/null and b/docs/architecture/img/adr-075-log-before.png differ diff --git a/docs/introduction/architecture.md b/docs/introduction/architecture.md index 3b70e70151..27e1b34c66 100644 --- a/docs/introduction/architecture.md +++ b/docs/introduction/architecture.md @@ -61,7 +61,7 @@ Here are some relevant facts about TCP: ![tcp](../imgs/tcp-window.png) -In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary. +In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary. The `mconnection` is represented by a struct, which contains a batch of messages, read and write buffers, and a map of channel IDs to reactors. It communicates with TCP via file descriptors, which it can write to. There is one `mconnection` per peer connection. diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index baaa9a8128..6589e044aa 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -20,6 +20,7 @@ The following metrics are available: | **Name** | **Type** | **Tags** | **Description** | | -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | +| abci_connection_method_timing | Histogram | method, type | Timings for each of the ABCI methods | | consensus_height | Gauge | | Height of the chain | | consensus_validators | Gauge | | Number of validators | | consensus_validators_power | Gauge | | Total voting power of all validators | @@ -55,6 +56,16 @@ The following metrics are available: Percentage of missing + byzantine validators: -```md -((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +```prometheus +((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +``` + +Rate at which the application is responding to each ABCI method call. +``` +sum(rate(tendermint_abci_connection_method_timing_count[5m])) by (method) +``` + +The 95th percentile response time for the application to the `deliver_tx` ABCI method call. +``` +histogram_quantile(0.95, sum by(le) (rate(tendermint_abci_connection_method_timing_bucket{method="deliver_tx"}[5m]))) ``` diff --git a/docs/package-lock.json b/docs/package-lock.json index 1c362b60af..1286cf4385 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -1,12684 +1,151 @@ { "name": "docs", "version": "1.0.0", - "lockfileVersion": 2, + "lockfileVersion": 1, "requires": true, - "packages": { - "": { - "version": "1.0.0", - "license": "ISC", - "dependencies": { - "vuepress-theme-cosmos": "^1.0.182" - }, - "devDependencies": { - "watchpack": "^2.2.0" - } - }, - "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.8.6.tgz", - "integrity": "sha512-Bam7otzjIEgrRXWmk0Amm1+B3ROI5dQnUfJEBjIy0YPM0kMahEoJXCw6160tGKxJLl1g6icoC953nGshQKO7cA==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/cache-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.8.6.tgz", - "integrity": "sha512-eGQlsXU5G7n4RvV/K6qe6lRAeL6EKAYPT3yZDBjCW4pAh7JWta+77a7BwUQkTqXN1MEQWZXjex3E4z/vFpzNrg==" - }, - "node_modules/@algolia/cache-in-memory": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.8.6.tgz", - "integrity": "sha512-kbJrvCFANxL/l5Pq1NFyHLRphKDwmqcD/OJga0IbNKEulRGDPkt1+pC7/q8d2ikP12adBjLLg2CVias9RJpIaw==", - "dependencies": { - "@algolia/cache-common": "4.8.6" - } - }, - "node_modules/@algolia/client-account": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.8.6.tgz", - "integrity": "sha512-FQVJE/BgCb78jtG7V0r30sMl9P5JKsrsOacGtGF2YebqI0YF25y8Z1nO39lbdjahxUS3QkDw2d0P2EVMj65g2Q==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-analytics": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.8.6.tgz", - "integrity": "sha512-ZBYFUlzNaWDFtt0rYHI7xbfVX0lPWU9lcEEXI/BlnkRgEkm247H503tNatPQFA1YGkob52EU18sV1eJ+OFRBLA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.8.6.tgz", - "integrity": "sha512-8dI+K3Nvbes2YRZm2LY7bdCUD05e60BhacrMLxFuKxnBGuNehME1wbxq/QxcG1iNFJlxLIze5TxIcNN3+pn76g==", - "dependencies": { - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-recommendation": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.8.6.tgz", - "integrity": "sha512-Kg8DpjwvaWWujNx6sAUrSL+NTHxFe/UNaliCcSKaMhd3+FiPXN+CrSkO0KWR7I+oK2qGBTG/2Y0BhFOJ5/B/RA==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/client-search": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.8.6.tgz", - "integrity": "sha512-vXLS6umL/9G3bwqc6pkrS9K5/s8coq55mpfRARL+bs0NsToOf77WSTdwzlxv/KdbVF7dHjXgUpBvJ6RyR4ZdAw==", - "dependencies": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/@algolia/logger-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.8.6.tgz", - "integrity": "sha512-FMRxZGdDxSzd0/Mv0R1021FvUt0CcbsQLYeyckvSWX8w+Uk4o0lcV6UtZdERVR5XZsGOqoXLMIYDbR2vkbGbVw==" - }, - "node_modules/@algolia/logger-console": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.8.6.tgz", - "integrity": "sha512-TYw9lwUCjvApC6Z0zn36T6gkCl7hbfJmnU+Z/D8pFJ3Yp7lz06S3oWGjbdrULrYP1w1VOhjd0X7/yGNsMhzutQ==", - "dependencies": { - "@algolia/logger-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-browser-xhr": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.8.6.tgz", - "integrity": "sha512-omh6uJ3CJXOmcrU9M3/KfGg8XkUuGJGIMkqEbkFvIebpBJxfs6TVs0ziNeMFAcAfhi8/CGgpLbDSgJtWdGQa6w==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/requester-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.8.6.tgz", - "integrity": "sha512-r5xJqq/D9KACkI5DgRbrysVL5DUUagikpciH0k0zjBbm+cXiYfpmdflo/h6JnY6kmvWgjr/4DoeTjKYb/0deAQ==" - }, - "node_modules/@algolia/requester-node-http": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.8.6.tgz", - "integrity": "sha512-TB36OqTVOKyHCOtdxhn/IJyI/NXi/BWy8IEbsiWwwZWlL79NWHbetj49jXWFolEYEuu8PgDjjZGpRhypSuO9XQ==", - "dependencies": { - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@algolia/transporter": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.8.6.tgz", - "integrity": "sha512-NRb31J0TP7EPoVMpXZ4yAtr61d26R8KGaf6qdULknvq5sOVHuuH4PwmF08386ERfIsgnM/OBhl+uzwACdCIjSg==", - "dependencies": { - "@algolia/cache-common": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/requester-common": "4.8.6" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", - "dependencies": { - "@babel/highlight": "^7.12.13" - } - }, - "node_modules/@babel/compat-data": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz", - "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA==" - }, - "node_modules/@babel/core": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.15.tgz", - "integrity": "sha512-6GXmNYeNjS2Uz+uls5jalOemgIhnTMeaXo+yBUA72kC2uX/8VW6XyhVIo2L8/q0goKQA3EVKx0KOQpVKSeWadQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-module-transforms": "^7.13.14", - "@babel/helpers": "^7.13.10", - "@babel/parser": "^7.13.15", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.15", - "@babel/types": "^7.13.14", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.1.2", - "semver": "^6.3.0", - "source-map": "^0.5.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/core/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/core/node_modules/json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "json5": "lib/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@babel/core/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/core/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/generator": { - "version": "7.13.9", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.9.tgz", - "integrity": "sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw==", - "dependencies": { - "@babel/types": "^7.13.0", - "jsesc": "^2.5.1", - "source-map": "^0.5.0" - } - }, - "node_modules/@babel/generator/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz", - "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz", - "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==", - "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-compilation-targets": { - "version": "7.13.13", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.13.tgz", - "integrity": "sha512-q1kcdHNZehBwD9jYPh3WyXcsFERi39X4I59I3NadciWtNDyZ6x+GboOxncFK0kXlKIv6BJm5acncehXWUjWQMQ==", - "dependencies": { - "@babel/compat-data": "^7.13.12", - "@babel/helper-validator-option": "^7.12.17", - "browserslist": "^4.14.5", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.13.11", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz", - "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-member-expression-to-functions": "^7.13.0", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13" - } - }, - "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz", - "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "regexpu-core": "^4.7.1" - } - }, - "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz", - "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==", - "dependencies": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/helper-define-polyfill-provider/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz", - "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==", - "dependencies": { - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", - "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", - "dependencies": { - "@babel/helper-get-function-arity": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-get-function-arity": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", - "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.0.tgz", - "integrity": "sha512-0kBzvXiIKfsCA0y6cFEIJf4OdzfpRuNk4+YTeHZpGGc666SATFKTz6sRncwFnQk7/ugJ4dSrCj6iJuvW4Qwr2g==", - "dependencies": { - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz", - "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-imports": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz", - "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-module-transforms": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz", - "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-replace-supers": "^7.13.12", - "@babel/helper-simple-access": "^7.13.12", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/helper-validator-identifier": "^7.12.11", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.13", - "@babel/types": "^7.13.14" - } - }, - "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", - "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-plugin-utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz", - "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ==" - }, - "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz", - "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-wrap-function": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helper-replace-supers": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz", - "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==", - "dependencies": { - "@babel/helper-member-expression-to-functions": "^7.13.12", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-simple-access": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz", - "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==", - "dependencies": { - "@babel/types": "^7.13.12" - } - }, - "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", - "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", - "dependencies": { - "@babel/types": "^7.12.1" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", - "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", - "dependencies": { - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==" - }, - "node_modules/@babel/helper-validator-option": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz", - "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw==" - }, - "node_modules/@babel/helper-wrap-function": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz", - "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/helpers": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.10.tgz", - "integrity": "sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ==", - "dependencies": { - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" - } - }, - "node_modules/@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "node_modules/@babel/parser": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.15.tgz", - "integrity": "sha512-b9COtcAlVEQljy/9fbcMHpG+UIW9ReF+gpaxDHTlZd0c6/UU9ng8zdySAW9sRTzpvcdCHn6bUcbuYUgGzLAWVQ==", - "bin": { - "parser": "bin/babel-parser.js" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz", - "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-proposal-optional-chaining": "^7.13.12" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz", - "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz", - "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-decorators": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz", - "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.11", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-decorators": "^7.12.13" - } - }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz", - "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz", - "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz", - "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz", - "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz", - "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz", - "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz", - "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==", - "dependencies": { - "@babel/compat-data": "^7.13.8", - "@babel/helper-compilation-targets": "^7.13.8", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz", - "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz", - "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz", - "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz", - "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-decorators": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz", - "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz", - "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz", - "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz", - "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz", - "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==", - "dependencies": { - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz", - "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz", - "integrity": "sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz", - "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz", - "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz", - "integrity": "sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz", - "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz", - "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz", - "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==", - "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-for-of": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz", - "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz", - "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==", - "dependencies": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz", - "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz", - "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz", - "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz", - "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-simple-access": "^7.12.13", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz", - "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==", - "dependencies": { - "@babel/helper-hoist-variables": "^7.13.0", - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-identifier": "^7.12.11", - "babel-plugin-dynamic-import-node": "^2.3.3" - } - }, - "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz", - "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==", - "dependencies": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz", - "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-new-target": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz", - "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz", - "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/helper-replace-supers": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz", - "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz", - "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz", - "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==", - "dependencies": { - "regenerator-transform": "^0.14.2" - } - }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz", - "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz", - "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==", - "dependencies": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz", - "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz", - "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" - } - }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz", - "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz", - "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.13.0" - } - }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz", - "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz", - "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz", - "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "node_modules/@babel/preset-env": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz", - "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==", - "dependencies": { - "@babel/compat-data": "^7.13.15", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-option": "^7.12.17", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-async-generator-functions": "^7.13.15", - "@babel/plugin-proposal-class-properties": "^7.13.0", - "@babel/plugin-proposal-dynamic-import": "^7.13.8", - "@babel/plugin-proposal-export-namespace-from": "^7.12.13", - "@babel/plugin-proposal-json-strings": "^7.13.8", - "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8", - "@babel/plugin-proposal-numeric-separator": "^7.12.13", - "@babel/plugin-proposal-object-rest-spread": "^7.13.8", - "@babel/plugin-proposal-optional-catch-binding": "^7.13.8", - "@babel/plugin-proposal-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-private-methods": "^7.13.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.12.13", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.12.13", - "@babel/plugin-transform-arrow-functions": "^7.13.0", - "@babel/plugin-transform-async-to-generator": "^7.13.0", - "@babel/plugin-transform-block-scoped-functions": "^7.12.13", - "@babel/plugin-transform-block-scoping": "^7.12.13", - "@babel/plugin-transform-classes": "^7.13.0", - "@babel/plugin-transform-computed-properties": "^7.13.0", - "@babel/plugin-transform-destructuring": "^7.13.0", - "@babel/plugin-transform-dotall-regex": "^7.12.13", - "@babel/plugin-transform-duplicate-keys": "^7.12.13", - "@babel/plugin-transform-exponentiation-operator": "^7.12.13", - "@babel/plugin-transform-for-of": "^7.13.0", - "@babel/plugin-transform-function-name": "^7.12.13", - "@babel/plugin-transform-literals": "^7.12.13", - "@babel/plugin-transform-member-expression-literals": "^7.12.13", - "@babel/plugin-transform-modules-amd": "^7.13.0", - "@babel/plugin-transform-modules-commonjs": "^7.13.8", - "@babel/plugin-transform-modules-systemjs": "^7.13.8", - "@babel/plugin-transform-modules-umd": "^7.13.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13", - "@babel/plugin-transform-new-target": "^7.12.13", - "@babel/plugin-transform-object-super": "^7.12.13", - "@babel/plugin-transform-parameters": "^7.13.0", - "@babel/plugin-transform-property-literals": "^7.12.13", - "@babel/plugin-transform-regenerator": "^7.13.15", - "@babel/plugin-transform-reserved-words": "^7.12.13", - "@babel/plugin-transform-shorthand-properties": "^7.12.13", - "@babel/plugin-transform-spread": "^7.13.0", - "@babel/plugin-transform-sticky-regex": "^7.12.13", - "@babel/plugin-transform-template-literals": "^7.13.0", - "@babel/plugin-transform-typeof-symbol": "^7.12.13", - "@babel/plugin-transform-unicode-escapes": "^7.12.13", - "@babel/plugin-transform-unicode-regex": "^7.12.13", - "@babel/preset-modules": "^0.1.4", - "@babel/types": "^7.13.14", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "core-js-compat": "^3.9.0", - "semver": "^6.3.0" - } - }, - "node_modules/@babel/preset-modules": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz", - "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "node_modules/@babel/runtime": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.10.tgz", - "integrity": "sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw==", - "dependencies": { - "regenerator-runtime": "^0.13.4" - } - }, - "node_modules/@babel/template": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", - "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/parser": "^7.12.13", - "@babel/types": "^7.12.13" - } - }, - "node_modules/@babel/traverse": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.15.tgz", - "integrity": "sha512-/mpZMNvj6bce59Qzl09fHEs8Bt8NnpEDQYleHUPZQ3wXUMvXi+HJPLars68oAbmp839fGoOkv2pSL2z9ajCIaQ==", - "dependencies": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/parser": "^7.13.15", - "@babel/types": "^7.13.14", - "debug": "^4.1.0", - "globals": "^11.1.0" - } - }, - "node_modules/@babel/traverse/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/@babel/traverse/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/@babel/types": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.14.tgz", - "integrity": "sha512-A2aa3QTkWoyqsZZFl56MLUsfmh7O0gN41IPvXAE/++8ojpbz12SszD7JEGYVdn4f9Kt4amIei07swF1h4AqmmQ==", - "dependencies": { - "@babel/helper-validator-identifier": "^7.12.11", - "lodash": "^4.17.19", - "to-fast-properties": "^2.0.0" - } - }, - "node_modules/@cosmos-ui/vue": { - "version": "0.35.0", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz", - "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==", - "dependencies": { - "algoliasearch": "^4.1.0", - "axios": "^0.19.2", - "clipboard-copy": "^3.1.0", - "fuse.js": "^3.4.6", - "hotkeys-js": "^3.7.3", - "js-base64": "^2.5.2", - "lodash": "^4.17.15", - "markdown-it": "^10.0.0", - "prismjs": "^1.19.0", - "querystring": "^0.2.0", - "tiny-cookie": "^2.3.1", - "vue": "^2.6.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/axios": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", - "dependencies": { - "follow-redirects": "1.5.10" - } - }, - "node_modules/@cosmos-ui/vue/node_modules/entities": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", - "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" - }, - "node_modules/@cosmos-ui/vue/node_modules/markdown-it": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz", - "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~2.0.0", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@mrmlnc/readdir-enhanced": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz", - "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==", - "dependencies": { - "call-me-maybe": "^1.0.1", - "glob-to-regexp": "^0.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz", - "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@types/glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", - "dependencies": { - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "node_modules/@types/json-schema": { - "version": "7.0.7", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz", - "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA==" - }, - "node_modules/@types/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==" - }, - "node_modules/@types/node": { - "version": "14.14.37", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.37.tgz", - "integrity": "sha512-XYmBiy+ohOR4Lh5jE379fV2IU+6Jn4g5qASinhitfyO71b/sCo6MKsMLF5tc7Zf2CE8hViVQyYSobJNke8OvUw==" - }, - "node_modules/@types/q": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", - "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" - }, - "node_modules/@vue/babel-helper-vue-jsx-merge-props": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz", - "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA==" - }, - "node_modules/@vue/babel-helper-vue-transform-on": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.2.tgz", - "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA==" - }, - "node_modules/@vue/babel-plugin-jsx": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.4.tgz", - "integrity": "sha512-Vu5gsabUdsiWc4vQarg46xWJGs8pMEJyyMQAKA1vO+F4+aR4/jaxWxPCOvZ7XvVyy+ecSbwQp/qIyDVje360UQ==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.0.0", - "@babel/template": "^7.0.0", - "@babel/traverse": "^7.0.0", - "@babel/types": "^7.0.0", - "@vue/babel-helper-vue-transform-on": "^1.0.2", - "camelcase": "^6.0.0", - "html-tags": "^3.1.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz", - "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==", - "dependencies": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "html-tags": "^2.0.0", - "lodash.kebabcase": "^4.1.1", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-plugin-transform-vue-jsx/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-preset-app": { - "version": "4.5.12", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz", - "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==", - "dependencies": { - "@babel/core": "^7.11.0", - "@babel/helper-compilation-targets": "^7.9.6", - "@babel/helper-module-imports": "^7.8.3", - "@babel/plugin-proposal-class-properties": "^7.8.3", - "@babel/plugin-proposal-decorators": "^7.8.3", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-jsx": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.11.0", - "@babel/preset-env": "^7.11.0", - "@babel/runtime": "^7.11.0", - "@vue/babel-plugin-jsx": "^1.0.3", - "@vue/babel-preset-jsx": "^1.2.4", - "babel-plugin-dynamic-import-node": "^2.3.3", - "core-js": "^3.6.5", - "core-js-compat": "^3.6.5", - "semver": "^6.1.0" - } - }, - "node_modules/@vue/babel-preset-jsx": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz", - "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==", - "dependencies": { - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "@vue/babel-sugar-composition-api-inject-h": "^1.2.1", - "@vue/babel-sugar-composition-api-render-instance": "^1.2.4", - "@vue/babel-sugar-functional-vue": "^1.2.2", - "@vue/babel-sugar-inject-h": "^1.2.2", - "@vue/babel-sugar-v-model": "^1.2.3", - "@vue/babel-sugar-v-on": "^1.2.3" - } - }, - "node_modules/@vue/babel-sugar-composition-api-inject-h": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz", - "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-composition-api-render-instance": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz", - "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-functional-vue": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz", - "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-inject-h": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz", - "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0" - } - }, - "node_modules/@vue/babel-sugar-v-model": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz", - "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0", - "html-tags": "^2.0.0", - "svg-tags": "^1.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/babel-sugar-v-model/node_modules/html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=", - "engines": { - "node": ">=4" - } - }, - "node_modules/@vue/babel-sugar-v-on": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz", - "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==", - "dependencies": { - "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", - "camelcase": "^5.0.0" - } - }, - "node_modules/@vue/babel-sugar-v-on/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/@vue/component-compiler-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz", - "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==", - "dependencies": { - "consolidate": "^0.15.1", - "hash-sum": "^1.0.2", - "lru-cache": "^4.1.2", - "merge-source-map": "^1.1.0", - "postcss": "^7.0.14", - "postcss-selector-parser": "^6.0.2", - "prettier": "^1.18.2", - "source-map": "~0.6.1", - "vue-template-es2015-compiler": "^1.9.0" - }, - "optionalDependencies": { - "prettier": "^1.18.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dependencies": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@vue/component-compiler-utils/node_modules/yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=" - }, - "node_modules/@vuepress/core": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz", - "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==", - "dependencies": { - "@babel/core": "^7.8.4", - "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "1.8.2", - "@vuepress/markdown-loader": "1.8.2", - "@vuepress/plugin-last-updated": "1.8.2", - "@vuepress/plugin-register-components": "1.8.2", - "@vuepress/shared-utils": "1.8.2", - "autoprefixer": "^9.5.1", - "babel-loader": "^8.0.4", - "cache-loader": "^3.0.0", - "chokidar": "^2.0.3", - "connect-history-api-fallback": "^1.5.0", - "copy-webpack-plugin": "^5.0.2", - "core-js": "^3.6.4", - "cross-spawn": "^6.0.5", - "css-loader": "^2.1.1", - "file-loader": "^3.0.1", - "js-yaml": "^3.13.1", - "lru-cache": "^5.1.1", - "mini-css-extract-plugin": "0.6.0", - "optimize-css-assets-webpack-plugin": "^5.0.1", - "portfinder": "^1.0.13", - "postcss-loader": "^3.0.0", - "postcss-safe-parser": "^4.0.1", - "toml": "^3.0.0", - "url-loader": "^1.0.1", - "vue": "^2.6.10", - "vue-loader": "^15.7.1", - "vue-router": "^3.4.5", - "vue-server-renderer": "^2.6.10", - "vue-template-compiler": "^2.6.10", - "vuepress-html-webpack-plugin": "^3.2.0", - "vuepress-plugin-container": "^2.0.2", - "webpack": "^4.8.1", - "webpack-chain": "^6.0.0", - "webpack-dev-server": "^3.5.1", - "webpack-merge": "^4.1.2", - "webpackbar": "3.2.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/@vuepress/markdown": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz", - "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2", - "markdown-it": "^8.4.1", - "markdown-it-anchor": "^5.0.2", - "markdown-it-chain": "^1.3.0", - "markdown-it-emoji": "^1.4.0", - "markdown-it-table-of-contents": "^0.4.0", - "prismjs": "^1.13.0" - } - }, - "node_modules/@vuepress/markdown-loader": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz", - "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==", - "dependencies": { - "@vuepress/markdown": "1.8.2", - "loader-utils": "^1.1.0", - "lru-cache": "^5.1.1" - } - }, - "node_modules/@vuepress/markdown/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/@vuepress/markdown/node_modules/markdown-it": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz", - "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==", - "dependencies": { - "argparse": "^1.0.7", - "entities": "~1.1.1", - "linkify-it": "^2.0.0", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/@vuepress/plugin-active-header-links": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz", - "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==", - "dependencies": { - "lodash.debounce": "^4.0.8" - } - }, - "node_modules/@vuepress/plugin-google-analytics": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", - "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" - }, - "node_modules/@vuepress/plugin-last-updated": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz", - "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==", - "dependencies": { - "cross-spawn": "^6.0.5" - } - }, - "node_modules/@vuepress/plugin-nprogress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz", - "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==", - "dependencies": { - "nprogress": "^0.2.0" - } - }, - "node_modules/@vuepress/plugin-register-components": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz", - "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==", - "dependencies": { - "@vuepress/shared-utils": "1.8.2" - } - }, - "node_modules/@vuepress/plugin-search": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz", - "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA==" - }, - "node_modules/@vuepress/shared-utils": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz", - "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==", - "dependencies": { - "chalk": "^2.3.2", - "escape-html": "^1.0.3", - "fs-extra": "^7.0.1", - "globby": "^9.2.0", - "gray-matter": "^4.0.1", - "hash-sum": "^1.0.2", - "semver": "^6.0.0", - "toml": "^3.0.0", - "upath": "^1.1.0" - } - }, - "node_modules/@vuepress/theme-default": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz", - "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==", - "dependencies": { - "@vuepress/plugin-active-header-links": "1.8.2", - "@vuepress/plugin-nprogress": "1.8.2", - "@vuepress/plugin-search": "1.8.2", - "docsearch.js": "^2.5.2", - "lodash": "^4.17.15", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "vuepress-plugin-container": "^2.0.2", - "vuepress-plugin-smooth-scroll": "^0.0.3" - } - }, - "node_modules/@webassemblyjs/ast": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz", - "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", - "dependencies": { - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz", - "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==" - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", - "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==" - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz", - "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==" - }, - "node_modules/@webassemblyjs/helper-code-frame": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz", - "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==", - "dependencies": { - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-fsm": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz", - "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==" - }, - "node_modules/@webassemblyjs/helper-module-context": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz", - "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0" - } - }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", - "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==" - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz", - "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0" - } - }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz", - "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==", - "dependencies": { - "@xtuc/ieee754": "^1.2.0" - } - }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz", - "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==", - "dependencies": { - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz", - "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==" - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz", - "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/helper-wasm-section": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-opt": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "@webassemblyjs/wast-printer": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz", - "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz", - "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-buffer": "1.9.0", - "@webassemblyjs/wasm-gen": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz", - "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-wasm-bytecode": "1.9.0", - "@webassemblyjs/ieee754": "1.9.0", - "@webassemblyjs/leb128": "1.9.0", - "@webassemblyjs/utf8": "1.9.0" - } - }, - "node_modules/@webassemblyjs/wast-parser": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz", - "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/floating-point-hex-parser": "1.9.0", - "@webassemblyjs/helper-api-error": "1.9.0", - "@webassemblyjs/helper-code-frame": "1.9.0", - "@webassemblyjs/helper-fsm": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", - "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/wast-parser": "1.9.0", - "@xtuc/long": "4.2.2" - } - }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" - }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" - }, - "node_modules/accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dependencies": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "7.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", - "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/agentkeepalive": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz", - "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=", - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "node_modules/ajv-errors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", - "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==" - }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" - }, - "node_modules/algoliasearch": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.8.6.tgz", - "integrity": "sha512-G8IA3lcgaQB4r9HuQ4G+uSFjjz0Wv2OgEPiQ8emA+G2UUlroOfMl064j1bq/G+QTW0LmTQp9JwrFDRWxFM9J7w==", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.8.6", - "@algolia/cache-common": "4.8.6", - "@algolia/cache-in-memory": "4.8.6", - "@algolia/client-account": "4.8.6", - "@algolia/client-analytics": "4.8.6", - "@algolia/client-common": "4.8.6", - "@algolia/client-recommendation": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/logger-console": "4.8.6", - "@algolia/requester-browser-xhr": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/requester-node-http": "4.8.6", - "@algolia/transporter": "4.8.6" - } - }, - "node_modules/alphanum-sort": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz", - "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=" - }, - "node_modules/ansi-align": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz", - "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", - "dependencies": { - "string-width": "^3.0.0" - } - }, - "node_modules/ansi-colors": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.4.tgz", - "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dependencies": { - "type-fest": "^0.21.3" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-html": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz", - "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4=", - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" - } - }, - "node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/anymatch": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", - "dependencies": { - "micromatch": "^3.1.4", - "normalize-path": "^2.1.1" - } - }, - "node_modules/anymatch/node_modules/normalize-path": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", - "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "dependencies": { - "remove-trailing-separator": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==" - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/arr-diff": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", - "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/arr-union": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", - "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==" - }, - "node_modules/array-union": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", - "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", - "dependencies": { - "array-uniq": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-uniq": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", - "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/array-unique": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", - "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/asap": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", - "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" - }, - "node_modules/asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/asn1.js": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", - "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", - "dependencies": { - "bn.js": "^4.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/asn1.js/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/assert": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", - "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", - "dependencies": { - "object-assign": "^4.1.1", - "util": "0.10.3" - } - }, - "node_modules/assert-never": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz", - "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw==" - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/assert/node_modules/inherits": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", - "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=" - }, - "node_modules/assert/node_modules/util": { - "version": "0.10.3", - "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", - "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", - "dependencies": { - "inherits": "2.0.1" - } - }, - "node_modules/assign-symbols": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", - "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "dependencies": { - "lodash": "^4.17.14" - } - }, - "node_modules/async-each": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", - "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==" - }, - "node_modules/async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "node_modules/atob": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", - "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", - "bin": { - "atob": "bin/atob.js" - }, - "engines": { - "node": ">= 4.5.0" - } - }, - "node_modules/autocomplete.js": { - "version": "0.36.0", - "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz", - "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==", - "dependencies": { - "immediate": "^3.2.3" - } - }, - "node_modules/autoprefixer": { - "version": "9.8.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", - "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", - "dependencies": { - "browserslist": "^4.12.0", - "caniuse-lite": "^1.0.30001109", - "colorette": "^1.2.1", - "normalize-range": "^0.1.2", - "num2fraction": "^1.2.2", - "postcss": "^7.0.32", - "postcss-value-parser": "^4.1.0" - }, - "bin": { - "autoprefixer": "bin/autoprefixer" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" - }, - "node_modules/axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", - "dependencies": { - "follow-redirects": "^1.10.0" - } - }, - "node_modules/axios/node_modules/follow-redirects": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.3.tgz", - "integrity": "sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/babel-loader": { - "version": "8.2.2", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz", - "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==", - "dependencies": { - "find-cache-dir": "^3.3.1", - "loader-utils": "^1.4.0", - "make-dir": "^3.1.0", - "schema-utils": "^2.6.5" - }, - "engines": { - "node": ">= 8.9" - } - }, - "node_modules/babel-plugin-dynamic-import-node": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", - "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", - "dependencies": { - "object.assign": "^4.1.0" - } - }, - "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz", - "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==", - "dependencies": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.2.0", - "semver": "^6.1.1" - } - }, - "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz", - "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0", - "core-js-compat": "^3.9.1" - } - }, - "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz", - "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==", - "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.2.0" - } - }, - "node_modules/babel-walk": { - "version": "3.0.0-canary-5", - "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz", - "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==", - "dependencies": { - "@babel/types": "^7.9.6" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/base": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", - "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", - "dependencies": { - "cache-base": "^1.0.1", - "class-utils": "^0.3.5", - "component-emitter": "^1.2.1", - "define-property": "^1.0.0", - "isobject": "^3.0.1", - "mixin-deep": "^1.2.0", - "pascalcase": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "node_modules/batch": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", - "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=" - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "engines": { - "node": "*" - } - }, - "node_modules/binary-extensions": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", - "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "optional": true, - "dependencies": { - "file-uri-to-path": "1.0.0" - } - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" - }, - "node_modules/bn.js": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz", - "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw==" - }, - "node_modules/body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", - "dependencies": { - "bytes": "3.1.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/body-parser/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/bonjour": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz", - "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=", - "dependencies": { - "array-flatten": "^2.1.0", - "deep-equal": "^1.0.1", - "dns-equal": "^1.0.0", - "dns-txt": "^2.0.2", - "multicast-dns": "^6.0.1", - "multicast-dns-service-types": "^1.1.0" - } - }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24=" - }, - "node_modules/boxen": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz", - "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==", - "dependencies": { - "ansi-align": "^3.0.0", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "cli-boxes": "^2.2.0", - "string-width": "^4.1.0", - "term-size": "^2.1.0", - "type-fest": "^0.8.1", - "widest-line": "^3.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/boxen/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/boxen/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/boxen/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/boxen/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/boxen/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", - "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", - "dependencies": { - "arr-flatten": "^1.1.0", - "array-unique": "^0.3.2", - "extend-shallow": "^2.0.1", - "fill-range": "^4.0.0", - "isobject": "^3.0.1", - "repeat-element": "^1.1.2", - "snapdragon": "^0.8.1", - "snapdragon-node": "^2.0.1", - "split-string": "^3.0.2", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, - "node_modules/browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "dependencies": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/browserify-cipher": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", - "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", - "dependencies": { - "browserify-aes": "^1.0.4", - "browserify-des": "^1.0.0", - "evp_bytestokey": "^1.0.0" - } - }, - "node_modules/browserify-des": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", - "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", - "dependencies": { - "cipher-base": "^1.0.1", - "des.js": "^1.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/browserify-rsa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", - "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", - "dependencies": { - "bn.js": "^5.0.0", - "randombytes": "^2.0.1" - } - }, - "node_modules/browserify-sign": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", - "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", - "dependencies": { - "bn.js": "^5.1.1", - "browserify-rsa": "^4.0.1", - "create-hash": "^1.2.0", - "create-hmac": "^1.1.7", - "elliptic": "^6.5.3", - "inherits": "^2.0.4", - "parse-asn1": "^5.1.5", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - } - }, - "node_modules/browserify-sign/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/browserify-sign/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "dependencies": { - "pako": "~1.0.5" - } - }, - "node_modules/browserslist": { - "version": "4.16.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", - "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", - "dependencies": { - "caniuse-lite": "^1.0.30001219", - "colorette": "^1.2.2", - "electron-to-chromium": "^1.3.723", - "escalade": "^3.1.1", - "node-releases": "^1.1.71" - }, - "bin": { - "browserslist": "cli.js" - }, - "engines": { - "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" - } - }, - "node_modules/browserslist/node_modules/caniuse-lite": { - "version": "1.0.30001228", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", - "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==" - }, - "node_modules/browserslist/node_modules/electron-to-chromium": { - "version": "1.3.738", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.738.tgz", - "integrity": "sha512-vCMf4gDOpEylPSLPLSwAEsz+R3ShP02Y3cAKMZvTqule3XcPp7tgc/0ESI7IS6ZeyBlGClE50N53fIOkcIVnpw==" - }, - "node_modules/buffer": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", - "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", - "dependencies": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4", - "isarray": "^1.0.0" - } - }, - "node_modules/buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" - }, - "node_modules/buffer-indexof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz", - "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g==" - }, - "node_modules/buffer-json": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/buffer-json/-/buffer-json-2.0.0.tgz", - "integrity": "sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw==" - }, - "node_modules/buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" - }, - "node_modules/builtin-status-codes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", - "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" - }, - "node_modules/bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.2", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.2.tgz", - "integrity": "sha512-w0bH1IF9rEjdi0a6lTtlXYT+vBZEJL9oytaXXRdsD68MH6+SrZGOGsu7s2saHQvYXqwo/wBdkW75tt8wFpj+mw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacache": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz", - "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", - "dependencies": { - "bluebird": "^3.5.5", - "chownr": "^1.1.1", - "figgy-pudding": "^3.5.1", - "glob": "^7.1.4", - "graceful-fs": "^4.1.15", - "infer-owner": "^1.0.3", - "lru-cache": "^5.1.1", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.1", - "move-concurrently": "^1.0.1", - "promise-inflight": "^1.0.1", - "rimraf": "^2.6.3", - "ssri": "^6.0.1", - "unique-filename": "^1.1.1", - "y18n": "^4.0.0" - } - }, - "node_modules/cacache/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-base": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", - "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", - "dependencies": { - "collection-visit": "^1.0.0", - "component-emitter": "^1.2.1", - "get-value": "^2.0.6", - "has-value": "^1.0.0", - "isobject": "^3.0.1", - "set-value": "^2.0.0", - "to-object-path": "^0.3.0", - "union-value": "^1.0.0", - "unset-value": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cache-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/cache-loader/-/cache-loader-3.0.1.tgz", - "integrity": "sha512-HzJIvGiGqYsFUrMjAJNDbVZoG7qQA+vy9AIoKs7s9DscNfki0I589mf2w6/tW+kkFH3zyiknoWV5Jdynu6b/zw==", - "dependencies": { - "buffer-json": "^2.0.0", - "find-cache-dir": "^2.1.0", - "loader-utils": "^1.2.3", - "mkdirp": "^0.5.1", - "neo-async": "^2.6.1", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/cache-loader/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/cache-loader/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/cache-loader/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/cache-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/cache-loader/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cacheable-request/node_modules/normalize-url": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", - "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "node_modules/call-me-maybe": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", - "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=" - }, - "node_modules/caller-callsite": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz", - "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=", - "dependencies": { - "callsites": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/caller-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz", - "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=", - "dependencies": { - "caller-callsite": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/callsites": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz", - "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=", - "engines": { - "node": ">=4" - } - }, - "node_modules/camel-case": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz", - "integrity": "sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=", - "dependencies": { - "no-case": "^2.2.0", - "upper-case": "^1.1.1" - } - }, - "node_modules/camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/caniuse-api": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", - "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-lite": "^1.0.0", - "lodash.memoize": "^4.1.2", - "lodash.uniq": "^4.5.0" - } - }, - "node_modules/caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/character-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz", - "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=", - "dependencies": { - "is-regex": "^1.0.3" - } - }, - "node_modules/cheerio": { - "version": "1.0.0-rc.6", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz", - "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==", - "dependencies": { - "cheerio-select": "^1.3.0", - "dom-serializer": "^1.3.1", - "domhandler": "^4.1.0", - "htmlparser2": "^6.1.0", - "parse5": "^6.0.1", - "parse5-htmlparser2-tree-adapter": "^6.0.1" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/cheerio-select": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.3.0.tgz", - "integrity": "sha512-mLgqdHxVOQyhOIkG5QnRkDg7h817Dkf0dAvlCio2TJMmR72cJKH0bF28SHXvLkVrGcGOiub0/Bs/CMnPeQO7qw==", - "dependencies": { - "css-select": "^4.0.0", - "css-what": "^5.0.0", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.2" - } - }, - "node_modules/chokidar": { - "version": "2.1.8", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", - "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", - "dependencies": { - "anymatch": "^2.0.0", - "async-each": "^1.0.1", - "braces": "^2.3.2", - "fsevents": "^1.2.7", - "glob-parent": "^3.1.0", - "inherits": "^2.0.3", - "is-binary-path": "^1.0.0", - "is-glob": "^4.0.0", - "normalize-path": "^3.0.0", - "path-is-absolute": "^1.0.0", - "readdirp": "^2.2.1", - "upath": "^1.1.1" - }, - "optionalDependencies": { - "fsevents": "^1.2.7" - } - }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" - }, - "node_modules/chrome-trace-event": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", - "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ci-info": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz", - "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ==" - }, - "node_modules/cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/class-utils": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", - "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", - "dependencies": { - "arr-union": "^3.1.0", - "define-property": "^0.2.5", - "isobject": "^3.0.0", - "static-extend": "^0.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/class-utils/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/clean-css": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz", - "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==", - "dependencies": { - "source-map": "~0.6.0" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "dependencies": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, - "node_modules/clipboard-copy": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", - "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" - }, - "node_modules/cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dependencies": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dependencies": { - "mimic-response": "^1.0.0" - } - }, - "node_modules/coa": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz", - "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==", - "dependencies": { - "@types/q": "^1.5.1", - "chalk": "^2.4.1", - "q": "^1.1.2" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/collection-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", - "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", - "dependencies": { - "map-visit": "^1.0.0", - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/color": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", - "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", - "dependencies": { - "color-convert": "^1.9.1", - "color-string": "^1.5.4" - } - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "node_modules/color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "node_modules/colorette": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", - "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==" - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "2.17.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz", - "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==" - }, - "node_modules/commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=" - }, - "node_modules/component-emitter": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", - "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==" - }, - "node_modules/compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dependencies": { - "mime-db": ">= 1.43.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dependencies": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/compression/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "node_modules/concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "engines": [ - "node >= 0.8" - ], - "dependencies": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "node_modules/configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "dependencies": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/connect-history-api-fallback": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz", - "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/consola": { - "version": "2.15.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz", - "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw==" - }, - "node_modules/console-browserify": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", - "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==" - }, - "node_modules/consolidate": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz", - "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==", - "dependencies": { - "bluebird": "^3.1.1" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/constantinople": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz", - "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==", - "dependencies": { - "@babel/parser": "^7.6.0", - "@babel/types": "^7.6.1" - } - }, - "node_modules/constants-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", - "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" - }, - "node_modules/content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", - "dependencies": { - "safe-buffer": "5.1.2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dependencies": { - "safe-buffer": "~5.1.1" - } - }, - "node_modules/cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "node_modules/copy-concurrently": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", - "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", - "dependencies": { - "aproba": "^1.1.1", - "fs-write-stream-atomic": "^1.0.8", - "iferr": "^0.1.5", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.0" - } - }, - "node_modules/copy-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/copy-descriptor": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", - "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/copy-webpack-plugin": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz", - "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==", - "dependencies": { - "cacache": "^12.0.3", - "find-cache-dir": "^2.1.0", - "glob-parent": "^3.1.0", - "globby": "^7.1.1", - "is-glob": "^4.0.1", - "loader-utils": "^1.2.3", - "minimatch": "^3.0.4", - "normalize-path": "^3.0.0", - "p-limit": "^2.2.1", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz", - "integrity": "sha1-+yzP+UAfhgCUXfral0QMypcrhoA=", - "dependencies": { - "array-union": "^1.0.1", - "dir-glob": "^2.0.0", - "glob": "^7.1.2", - "ignore": "^3.3.5", - "pify": "^3.0.0", - "slash": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/globby/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/ignore": { - "version": "3.3.10", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", - "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" - }, - "node_modules/copy-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/copy-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/copy-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/copy-webpack-plugin/node_modules/slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/core-js": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.10.1.tgz", - "integrity": "sha512-pwCxEXnj27XG47mu7SXAwhLP3L5CrlvCB91ANUkIz40P27kUcvNfSdvyZJ9CLHiVoKSp+TTChMQMSKQEH/IQxA==", - "hasInstallScript": true - }, - "node_modules/core-js-compat": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.10.1.tgz", - "integrity": "sha512-ZHQTdTPkqvw2CeHiZC970NNJcnwzT6YIueDMASKt+p3WbZsLXOcoD392SkcWhkC0wBBHhlfhqGKKsNCQUozYtg==", - "dependencies": { - "browserslist": "^4.16.3", - "semver": "7.0.0" - } - }, - "node_modules/core-js-compat/node_modules/semver": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", - "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "node_modules/cosmiconfig": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz", - "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==", - "dependencies": { - "import-fresh": "^2.0.0", - "is-directory": "^0.3.1", - "js-yaml": "^3.13.1", - "parse-json": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/create-ecdh": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", - "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", - "dependencies": { - "bn.js": "^4.1.0", - "elliptic": "^6.5.3" - } - }, - "node_modules/create-ecdh/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "dependencies": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "node_modules/create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "dependencies": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "node_modules/cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "dependencies": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "engines": { - "node": ">=4.8" - } - }, - "node_modules/cross-spawn/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/crypto-browserify": { - "version": "3.12.0", - "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", - "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", - "dependencies": { - "browserify-cipher": "^1.0.0", - "browserify-sign": "^4.0.0", - "create-ecdh": "^4.0.0", - "create-hash": "^1.1.0", - "create-hmac": "^1.1.0", - "diffie-hellman": "^5.0.0", - "inherits": "^2.0.1", - "pbkdf2": "^3.0.3", - "public-encrypt": "^4.0.0", - "randombytes": "^2.0.0", - "randomfill": "^1.0.3" - }, - "engines": { - "node": "*" - } - }, - "node_modules/crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/css": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz", - "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==", - "dependencies": { - "inherits": "^2.0.3", - "source-map": "^0.6.1", - "source-map-resolve": "^0.5.2", - "urix": "^0.1.0" - } - }, - "node_modules/css-color-names": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz", - "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=", - "engines": { - "node": "*" - } - }, - "node_modules/css-declaration-sorter": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz", - "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==", - "dependencies": { - "postcss": "^7.0.1", - "timsort": "^0.3.0" - }, - "engines": { - "node": ">4" - } - }, - "node_modules/css-loader": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-2.1.1.tgz", - "integrity": "sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==", - "dependencies": { - "camelcase": "^5.2.0", - "icss-utils": "^4.1.0", - "loader-utils": "^1.2.3", - "normalize-path": "^3.0.0", - "postcss": "^7.0.14", - "postcss-modules-extract-imports": "^2.0.0", - "postcss-modules-local-by-default": "^2.0.6", - "postcss-modules-scope": "^2.1.0", - "postcss-modules-values": "^2.0.0", - "postcss-value-parser": "^3.3.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/css-loader/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/css-loader/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/css-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/css-parse": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/css-parse/-/css-parse-2.0.0.tgz", - "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=", - "dependencies": { - "css": "^2.0.0" - } - }, - "node_modules/css-select": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.0.0.tgz", - "integrity": "sha512-I7favumBlDP/nuHBKLfL5RqvlvRdn/W29evvWJ+TaoGPm7QD+xSIN5eY2dyGjtkUmemh02TZrqJb4B8DWo6PoQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^5.0.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.1", - "nth-check": "^2.0.0" - } - }, - "node_modules/css-select-base-adapter": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz", - "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w==" - }, - "node_modules/css-tree": { - "version": "1.0.0-alpha.37", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz", - "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==", - "dependencies": { - "mdn-data": "2.0.4", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/css-tree/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/css-what": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz", - "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", - "bin": { - "cssesc": "bin/cssesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cssnano": { - "version": "4.1.11", - "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz", - "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "cssnano-preset-default": "^4.0.8", - "is-resolvable": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-preset-default": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz", - "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==", - "dependencies": { - "css-declaration-sorter": "^4.0.1", - "cssnano-util-raw-cache": "^4.0.1", - "postcss": "^7.0.0", - "postcss-calc": "^7.0.1", - "postcss-colormin": "^4.0.3", - "postcss-convert-values": "^4.0.1", - "postcss-discard-comments": "^4.0.2", - "postcss-discard-duplicates": "^4.0.2", - "postcss-discard-empty": "^4.0.1", - "postcss-discard-overridden": "^4.0.1", - "postcss-merge-longhand": "^4.0.11", - "postcss-merge-rules": "^4.0.3", - "postcss-minify-font-values": "^4.0.2", - "postcss-minify-gradients": "^4.0.2", - "postcss-minify-params": "^4.0.2", - "postcss-minify-selectors": "^4.0.2", - "postcss-normalize-charset": "^4.0.1", - "postcss-normalize-display-values": "^4.0.2", - "postcss-normalize-positions": "^4.0.2", - "postcss-normalize-repeat-style": "^4.0.2", - "postcss-normalize-string": "^4.0.2", - "postcss-normalize-timing-functions": "^4.0.2", - "postcss-normalize-unicode": "^4.0.1", - "postcss-normalize-url": "^4.0.1", - "postcss-normalize-whitespace": "^4.0.2", - "postcss-ordered-values": "^4.1.2", - "postcss-reduce-initial": "^4.0.3", - "postcss-reduce-transforms": "^4.0.2", - "postcss-svgo": "^4.0.3", - "postcss-unique-selectors": "^4.0.1" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-arguments": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz", - "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-get-match": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz", - "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-raw-cache": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz", - "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/cssnano-util-same-parent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz", - "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/csso": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", - "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", - "dependencies": { - "css-tree": "^1.1.2" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/css-tree": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz", - "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==", - "dependencies": { - "mdn-data": "2.0.14", - "source-map": "^0.6.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/csso/node_modules/mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" - }, - "node_modules/csso/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cyclist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", - "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=" - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/de-indent": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", - "integrity": "sha1-sgOOhG3DO6pXlhKNCAS0VbjB4h0=" - }, - "node_modules/debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/deep-equal": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz", - "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==", - "dependencies": { - "is-arguments": "^1.0.4", - "is-date-object": "^1.0.1", - "is-regex": "^1.0.4", - "object-is": "^1.0.1", - "object-keys": "^1.1.1", - "regexp.prototype.flags": "^1.2.0" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/deepmerge": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-1.5.2.tgz", - "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-gateway": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.2.0.tgz", - "integrity": "sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==", - "dependencies": { - "execa": "^1.0.0", - "ip-regex": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==" - }, - "node_modules/define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dependencies": { - "object-keys": "^1.0.12" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/define-property": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", - "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", - "dependencies": { - "is-descriptor": "^1.0.2", - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/define-property/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-4.1.1.tgz", - "integrity": "sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ==", - "dependencies": { - "@types/glob": "^7.1.1", - "globby": "^6.1.0", - "is-path-cwd": "^2.0.0", - "is-path-in-cwd": "^2.0.0", - "p-map": "^2.0.0", - "pify": "^4.0.1", - "rimraf": "^2.6.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/del/node_modules/globby": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz", - "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=", - "dependencies": { - "array-union": "^1.0.1", - "glob": "^7.0.3", - "object-assign": "^4.0.1", - "pify": "^2.0.0", - "pinkie-promise": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/del/node_modules/globby/node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/des.js": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", - "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", - "dependencies": { - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "node_modules/detect-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz", - "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw==" - }, - "node_modules/diffie-hellman": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", - "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", - "dependencies": { - "bn.js": "^4.1.0", - "miller-rabin": "^4.0.0", - "randombytes": "^2.0.0" - } - }, - "node_modules/diffie-hellman/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/dir-glob": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz", - "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==", - "dependencies": { - "path-type": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0=" - }, - "node_modules/dns-packet": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.4.tgz", - "integrity": "sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==", - "dependencies": { - "ip": "^1.1.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/dns-txt": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz", - "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=", - "dependencies": { - "buffer-indexof": "^1.0.0" - } - }, - "node_modules/docsearch.js": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz", - "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==", - "dependencies": { - "algoliasearch": "^3.24.5", - "autocomplete.js": "0.36.0", - "hogan.js": "^3.0.2", - "request": "^2.87.0", - "stack-utils": "^1.0.1", - "to-factory": "^1.0.0", - "zepto": "^1.2.0" - } - }, - "node_modules/docsearch.js/node_modules/algoliasearch": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", - "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", - "dependencies": { - "agentkeepalive": "^2.2.0", - "debug": "^2.6.9", - "envify": "^4.0.0", - "es6-promise": "^4.1.0", - "events": "^1.1.0", - "foreach": "^2.0.5", - "global": "^4.3.2", - "inherits": "^2.0.1", - "isarray": "^2.0.1", - "load-script": "^1.0.0", - "object-keys": "^1.0.11", - "querystring-es3": "^0.2.1", - "reduce": "^1.0.1", - "semver": "^5.1.0", - "tunnel-agent": "^0.6.0" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/docsearch.js/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/docsearch.js/node_modules/events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/docsearch.js/node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" - }, - "node_modules/docsearch.js/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/doctypes": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz", - "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk=" - }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", - "dependencies": { - "utila": "~0.4" - } - }, - "node_modules/dom-serializer": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz", - "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "entities": "^2.0.0" - } - }, - "node_modules/dom-walk": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", - "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" - }, - "node_modules/domain-browser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", - "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", - "engines": { - "node": ">=0.4", - "npm": ">=1.2" - } - }, - "node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/domhandler": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.1.0.tgz", - "integrity": "sha512-/6/kmsGlMY4Tup/nGVutdrK9yQi4YjWVcVeoQmixpzjOUK1U7pQkvAPHBJeUxOgxF0J8f8lwCJSlCfD0V4CMGQ==", - "dependencies": { - "domelementtype": "^2.2.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/domutils": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.5.2.tgz", - "integrity": "sha512-MHTthCb1zj8f1GVfRpeZUbohQf/HdBos0oX5gZcQFepOZPLLRyj6Wn7XS7EMnY7CVpwv8863u2vyE83Hfu28HQ==", - "dependencies": { - "dom-serializer": "^1.0.1", - "domelementtype": "^2.2.0", - "domhandler": "^4.1.0" - } - }, - "node_modules/dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "dependencies": { - "is-obj": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "node_modules/duplexify": { - "version": "3.7.1", - "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", - "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", - "dependencies": { - "end-of-stream": "^1.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.0.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "node_modules/elliptic": { - "version": "6.5.4", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", - "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==", - "dependencies": { - "bn.js": "^4.11.9", - "brorand": "^1.1.0", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.1", - "inherits": "^2.0.4", - "minimalistic-assert": "^1.0.1", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/elliptic/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/enhanced-resolve": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz", - "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==", - "dependencies": { - "graceful-fs": "^4.1.2", - "memory-fs": "^0.5.0", - "tapable": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/enhanced-resolve/node_modules/memory-fs": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", - "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - }, - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" - }, - "node_modules/envify": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz", - "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==", - "dependencies": { - "esprima": "^4.0.0", - "through": "~2.3.4" - }, - "bin": { - "envify": "bin/envify" - } - }, - "node_modules/envinfo": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz", - "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==", - "bin": { - "envinfo": "dist/cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/errno": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz", - "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==", - "dependencies": { - "prr": "~1.0.1" - }, - "bin": { - "errno": "cli.js" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", - "dependencies": { - "call-bind": "^1.0.2", - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "get-intrinsic": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", - "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.2", - "string.prototype.trimend": "^1.0.4", - "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eslint-scope": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", - "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", - "dependencies": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/esm": { - "version": "3.2.25", - "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz", - "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "engines": { - "node": ">=0.8.x" - } - }, - "node_modules/eventsource": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz", - "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==", - "dependencies": { - "original": "^1.0.0" - }, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "dependencies": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "dependencies": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/expand-brackets": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", - "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", - "dependencies": { - "debug": "^2.3.3", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "posix-character-classes": "^0.1.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expand-brackets/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/expand-brackets/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", - "dependencies": { - "accepts": "~1.3.7", - "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", - "content-type": "~1.0.4", - "cookie": "0.4.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "~1.1.2", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "node_modules/express/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", - "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", - "dependencies": { - "array-unique": "^0.3.2", - "define-property": "^1.0.0", - "expand-brackets": "^2.1.4", - "extend-shallow": "^2.0.1", - "fragment-cache": "^0.2.1", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extglob/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "node_modules/fast-glob": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", - "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==", - "dependencies": { - "@mrmlnc/readdir-enhanced": "^2.2.1", - "@nodelib/fs.stat": "^1.1.2", - "glob-parent": "^3.1.0", - "is-glob": "^4.0.0", - "merge2": "^1.2.3", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "node_modules/faye-websocket": { - "version": "0.11.3", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz", - "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==", - "dependencies": { - "websocket-driver": ">=0.5.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/figgy-pudding": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", - "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==" - }, - "node_modules/figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/file-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-3.0.1.tgz", - "integrity": "sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==", - "dependencies": { - "loader-utils": "^1.0.2", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "optional": true - }, - "node_modules/fill-range": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", - "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-number": "^3.0.0", - "repeat-string": "^1.6.1", - "to-regex-range": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.3", - "statuses": "~1.5.0", - "unpipe": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/find-cache-dir": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", - "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/flush-write-stream": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", - "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", - "dependencies": { - "inherits": "^2.0.3", - "readable-stream": "^2.3.6" - } - }, - "node_modules/follow-redirects": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz", - "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==", - "dependencies": { - "debug": "=3.1.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/foreach": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", - "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=" - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fragment-cache": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", - "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", - "dependencies": { - "map-cache": "^0.2.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", - "dependencies": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - } - }, - "node_modules/fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/fs-write-stream-atomic": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", - "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", - "dependencies": { - "graceful-fs": "^4.1.2", - "iferr": "^0.1.5", - "imurmurhash": "^0.1.4", - "readable-stream": "1 || 2" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fsevents": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", - "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "dependencies": { - "bindings": "^1.5.0", - "nan": "^2.12.1" - }, - "engines": { - "node": ">= 4.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "node_modules/fuse.js": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", - "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz", - "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1" - } - }, - "node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/get-value": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", - "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - } - }, - "node_modules/glob-parent": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", - "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", - "dependencies": { - "is-glob": "^3.1.0", - "path-dirname": "^1.0.0" - } - }, - "node_modules/glob-parent/node_modules/is-glob": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", - "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", - "dependencies": { - "is-extglob": "^2.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/glob-to-regexp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=" - }, - "node_modules/global": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz", - "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==", - "dependencies": { - "min-document": "^2.19.0", - "process": "^0.11.10" - } - }, - "node_modules/global-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz", - "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==", - "dependencies": { - "ini": "1.3.7" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/globby": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-9.2.0.tgz", - "integrity": "sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg==", - "dependencies": { - "@types/glob": "^7.1.1", - "array-union": "^1.0.2", - "dir-glob": "^2.2.2", - "fast-glob": "^2.2.6", - "glob": "^7.1.3", - "ignore": "^4.0.3", - "pify": "^4.0.1", - "slash": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "dependencies": { - "delegate": "^3.1.2" - } - }, - "node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" - }, - "node_modules/gray-matter": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.2.tgz", - "integrity": "sha512-7hB/+LxrOjq/dd8APlK0r24uL/67w7SkYnfwhNFwg/VDIGWGmduTDYf3WNstLW2fbbmRwrDGCVSJ2isuf2+4Hw==", - "dependencies": { - "js-yaml": "^3.11.0", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==" - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/has-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", - "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", - "dependencies": { - "get-value": "^2.0.6", - "has-values": "^1.0.0", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", - "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", - "dependencies": { - "is-number": "^3.0.0", - "kind-of": "^4.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-values/node_modules/kind-of": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", - "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "dependencies": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/hash-base/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/hash-base/node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/hash-sum": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/hash-sum/-/hash-sum-1.0.2.tgz", - "integrity": "sha1-M7QHd3VMZDJXPBIMw4CLvRDUfwQ=" - }, - "node_modules/hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "dependencies": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "bin": { - "he": "bin/he" - } - }, - "node_modules/hex-color-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", - "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" - }, - "node_modules/hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "dependencies": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "node_modules/hogan.js": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz", - "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=", - "dependencies": { - "mkdirp": "0.3.0", - "nopt": "1.0.10" - }, - "bin": { - "hulk": "bin/hulk" - } - }, - "node_modules/hogan.js/node_modules/mkdirp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz", - "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=", - "engines": { - "node": "*" - } - }, - "node_modules/hotkeys-js": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz", - "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw==" - }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" - } - }, - "node_modules/hsl-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz", - "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4=" - }, - "node_modules/hsla-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz", - "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg=" - }, - "node_modules/html-entities": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.4.0.tgz", - "integrity": "sha512-8nxjcBcd8wovbeKx7h3wTji4e6+rhaVuPNpMqwWgnHh+N9ToqsCs6XztWRBPQ+UtzsoMAdKZtUENoVzU/EMtZA==" - }, - "node_modules/html-minifier": { - "version": "3.5.21", - "resolved": "https://registry.npmjs.org/html-minifier/-/html-minifier-3.5.21.tgz", - "integrity": "sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==", - "dependencies": { - "camel-case": "3.0.x", - "clean-css": "4.2.x", - "commander": "2.17.x", - "he": "1.2.x", - "param-case": "2.1.x", - "relateurl": "0.2.x", - "uglify-js": "3.4.x" - }, - "bin": { - "html-minifier": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/html-tags": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", - "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/htmlparser2": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", - "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", - "dependencies": { - "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", - "domutils": "^2.5.2", - "entities": "^2.0.0" - } - }, - "node_modules/http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=" - }, - "node_modules/http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-errors/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/http-parser-js": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz", - "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg==" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/http-proxy-middleware": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", - "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", - "dependencies": { - "http-proxy": "^1.17.0", - "is-glob": "^4.0.0", - "lodash": "^4.17.11", - "micromatch": "^3.1.10" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/https-browserify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", - "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=" - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/icss-replace-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz", - "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0=" - }, - "node_modules/icss-utils": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz", - "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==", - "dependencies": { - "postcss": "^7.0.14" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" - }, - "node_modules/iferr": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz", - "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=" - }, - "node_modules/ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, - "node_modules/import-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz", - "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=", - "dependencies": { - "import-from": "^2.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-fresh": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz", - "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=", - "dependencies": { - "caller-path": "^2.0.0", - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-from": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz", - "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz", - "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==", - "dependencies": { - "pkg-dir": "^3.0.0", - "resolve-cwd": "^2.0.0" - }, - "bin": { - "import-local-fixture": "fixtures/cli.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/import-local/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/import-local/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/indexes-of": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", - "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=" - }, - "node_modules/infer-owner": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", - "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==" - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==" - }, - "node_modules/internal-ip": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.3.0.tgz", - "integrity": "sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==", - "dependencies": { - "default-gateway": "^4.2.0", - "ipaddr.js": "^1.9.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/ip": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", - "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=" - }, - "node_modules/ip-regex": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz", - "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=", - "engines": { - "node": ">=4" - } - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-absolute-url": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", - "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", - "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-accessor-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-arguments": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz", - "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" - }, - "node_modules/is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" - }, - "node_modules/is-binary-path": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", - "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", - "dependencies": { - "binary-extensions": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", - "dependencies": { - "call-bind": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "node_modules/is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-ci/node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" - }, - "node_modules/is-color-stop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz", - "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=", - "dependencies": { - "css-color-names": "^0.0.4", - "hex-color-regex": "^1.1.0", - "hsl-regex": "^1.0.0", - "hsla-regex": "^1.0.0", - "rgb-regex": "^1.0.1", - "rgba-regex": "^1.0.0" - } - }, - "node_modules/is-core-module": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", - "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", - "dependencies": { - "has": "^1.0.3" - } - }, - "node_modules/is-data-descriptor": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", - "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-data-descriptor/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-descriptor": { - "version": "0.1.6", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", - "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", - "dependencies": { - "is-accessor-descriptor": "^0.1.6", - "is-data-descriptor": "^0.1.4", - "kind-of": "^5.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-descriptor/node_modules/kind-of": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", - "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-directory": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz", - "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-expression": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz", - "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==", - "dependencies": { - "acorn": "^7.1.1", - "object-assign": "^4.1.1" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dependencies": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-installed-globally/node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-npm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz", - "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-number": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", - "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-number/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-in-cwd": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz", - "integrity": "sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ==", - "dependencies": { - "is-path-inside": "^2.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-path-inside": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-2.1.0.tgz", - "integrity": "sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg==", - "dependencies": { - "path-is-inside": "^1.0.2" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-plain-object": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", - "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, - "node_modules/is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", - "dependencies": { - "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-resolvable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", - "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" - }, - "node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dependencies": { - "has-symbols": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "node_modules/is-windows": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", - "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-wsl": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "engines": { - "node": ">=4" - } - }, - "node_modules/is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==" - }, - "node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "node_modules/javascript-stringify": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz", - "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" - }, - "node_modules/js-base64": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz", - "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ==" - }, - "node_modules/js-stringify": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz", - "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds=" - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "bin": { - "jsesc": "bin/jsesc" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "node_modules/json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" - }, - "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "node_modules/json3": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.3.tgz", - "integrity": "sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA==" - }, - "node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dependencies": { - "minimist": "^1.2.0" - }, - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "dependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz", - "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=", - "dependencies": { - "debug": "^2.1.3" - } - }, - "node_modules/jsonp/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "node_modules/jstransformer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz", - "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=", - "dependencies": { - "is-promise": "^2.0.0", - "promise": "^7.0.1" - } - }, - "node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/killable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/killable/-/killable-1.0.1.tgz", - "integrity": "sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg==" - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/last-call-webpack-plugin": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz", - "integrity": "sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==", - "dependencies": { - "lodash": "^4.17.5", - "webpack-sources": "^1.1.0" - } - }, - "node_modules/latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dependencies": { - "package-json": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/linkify-it": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", - "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/load-script": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz", - "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ=" - }, - "node_modules/loader-runner": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", - "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", - "engines": { - "node": ">=4.3.0 <5.0.0 || >=5.10" - } - }, - "node_modules/loader-utils": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", - "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", - "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^1.0.1" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=" - }, - "node_modules/lodash.chunk": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz", - "integrity": "sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw=" - }, - "node_modules/lodash.clonedeep": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", - "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=" - }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" - }, - "node_modules/lodash.kebabcase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", - "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY=" - }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=" - }, - "node_modules/lodash.padstart": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz", - "integrity": "sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs=" - }, - "node_modules/lodash.sortby": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", - "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=" - }, - "node_modules/lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "node_modules/lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "dependencies": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" - }, - "node_modules/loglevel": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", - "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/lower-case": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", - "integrity": "sha1-miyr0bno4K6ZOkv31YdcOcQujqw=" - }, - "node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dependencies": { - "yallist": "^3.0.2" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/map-cache": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", - "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/map-visit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", - "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", - "dependencies": { - "object-visit": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/markdown-it": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.4.tgz", - "integrity": "sha512-34RwOXZT8kyuOJy25oJNJoulO8L0bTHYWXcdZBYZqFnjIy3NgjeoM3FmPXIOFQ26/lSHYMr8oc62B6adxXcb3Q==", - "dependencies": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/markdown-it-anchor": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", - "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==" - }, - "node_modules/markdown-it-attrs": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz", - "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/markdown-it-chain": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/markdown-it-chain/-/markdown-it-chain-1.3.0.tgz", - "integrity": "sha512-XClV8I1TKy8L2qsT9iX3qiV+50ZtcInGXI80CA+DP62sMs7hXlyV/RM3hfwy5O3Ad0sJm9xIwQELgANfESo8mQ==", - "dependencies": { - "webpack-chain": "^4.9.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/markdown-it-chain/node_modules/webpack-chain": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-4.12.1.tgz", - "integrity": "sha512-BCfKo2YkDe2ByqkEWe1Rw+zko4LsyS75LVr29C6xIrxAg9JHJ4pl8kaIZ396SUSNp6b4815dRZPSTAS8LlURRQ==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^1.6.0" - } - }, - "node_modules/markdown-it-container": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-it-container/-/markdown-it-container-2.0.0.tgz", - "integrity": "sha1-ABm0P9Au7+zi8ZYKKJX7qBpARpU=" - }, - "node_modules/markdown-it-emoji": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz", - "integrity": "sha1-m+4OmpkKljupbfaYDE/dsF37Tcw=" - }, - "node_modules/markdown-it-table-of-contents": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.4.4.tgz", - "integrity": "sha512-TAIHTHPwa9+ltKvKPWulm/beozQU41Ab+FIefRaQV1NRnpzwcV9QOe6wXQS5WLivm5Q/nlo0rl6laGkMDZE7Gw==", - "engines": { - "node": ">6.4.0" - } - }, - "node_modules/markdown-it/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/markdown-it/node_modules/linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/mdn-data": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", - "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==" - }, - "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4=" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memory-fs": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", - "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=", - "dependencies": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "node_modules/merge-source-map": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz", - "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", - "dependencies": { - "source-map": "^0.6.1" - } - }, - "node_modules/merge-source-map/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/micromatch": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", - "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "braces": "^2.3.1", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "extglob": "^2.0.4", - "fragment-cache": "^0.2.1", - "kind-of": "^6.0.2", - "nanomatch": "^1.2.9", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/micromatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/miller-rabin": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", - "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", - "dependencies": { - "bn.js": "^4.0.0", - "brorand": "^1.0.1" - }, - "bin": { - "miller-rabin": "bin/miller-rabin" - } - }, - "node_modules/miller-rabin/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/mime": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", - "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", - "dependencies": { - "mime-db": "1.47.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/min-document": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz", - "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=", - "dependencies": { - "dom-walk": "^0.1.0" - } - }, - "node_modules/mini-css-extract-plugin": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.6.0.tgz", - "integrity": "sha512-79q5P7YGI6rdnVyIAV4NXpBQJFWdkzJxCim3Kog4078fM0piAaFlwocqbejdWtLW1cEzCexPrh6EdyFsPgVdAw==", - "dependencies": { - "loader-utils": "^1.1.0", - "normalize-url": "^2.0.1", - "schema-utils": "^1.0.0", - "webpack-sources": "^1.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/mini-css-extract-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "node_modules/minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" - }, - "node_modules/mississippi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz", - "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", - "dependencies": { - "concat-stream": "^1.5.0", - "duplexify": "^3.4.2", - "end-of-stream": "^1.1.0", - "flush-write-stream": "^1.0.0", - "from2": "^2.1.0", - "parallel-transform": "^1.1.0", - "pump": "^3.0.0", - "pumpify": "^1.3.3", - "stream-each": "^1.1.0", - "through2": "^2.0.0" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/mixin-deep": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", - "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", - "dependencies": { - "for-in": "^1.0.2", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mixin-deep/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/move-concurrently": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", - "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=", - "dependencies": { - "aproba": "^1.1.1", - "copy-concurrently": "^1.0.0", - "fs-write-stream-atomic": "^1.0.8", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.3" - } - }, - "node_modules/move-concurrently/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/multicast-dns": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz", - "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==", - "dependencies": { - "dns-packet": "^1.3.1", - "thunky": "^1.0.2" - }, - "bin": { - "multicast-dns": "cli.js" - } - }, - "node_modules/multicast-dns-service-types": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz", - "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" - }, - "node_modules/nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "optional": true - }, - "node_modules/nanomatch": { - "version": "1.2.13", - "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", - "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", - "dependencies": { - "arr-diff": "^4.0.0", - "array-unique": "^0.3.2", - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "fragment-cache": "^0.2.1", - "is-windows": "^1.0.2", - "kind-of": "^6.0.2", - "object.pick": "^1.3.0", - "regex-not": "^1.0.0", - "snapdragon": "^0.8.1", - "to-regex": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/nanomatch/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" - }, - "node_modules/nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "node_modules/no-case": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz", - "integrity": "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==", - "dependencies": { - "lower-case": "^1.1.1" - } - }, - "node_modules/node-forge": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", - "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==", - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/node-libs-browser": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz", - "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", - "dependencies": { - "assert": "^1.1.1", - "browserify-zlib": "^0.2.0", - "buffer": "^4.3.0", - "console-browserify": "^1.1.0", - "constants-browserify": "^1.0.0", - "crypto-browserify": "^3.11.0", - "domain-browser": "^1.1.1", - "events": "^3.0.0", - "https-browserify": "^1.0.0", - "os-browserify": "^0.3.0", - "path-browserify": "0.0.1", - "process": "^0.11.10", - "punycode": "^1.2.4", - "querystring-es3": "^0.2.0", - "readable-stream": "^2.3.3", - "stream-browserify": "^2.0.1", - "stream-http": "^2.7.2", - "string_decoder": "^1.0.0", - "timers-browserify": "^2.0.4", - "tty-browserify": "0.0.0", - "url": "^0.11.0", - "util": "^0.11.0", - "vm-browserify": "^1.0.1" - } - }, - "node_modules/node-libs-browser/node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - }, - "node_modules/node-releases": { - "version": "1.1.71", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz", - "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg==" - }, - "node_modules/nopt": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", - "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - } - }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "dependencies": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "dependencies": { - "path-key": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/nprogress": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", - "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E=" - }, - "node_modules/nth-check": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz", - "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==", - "dependencies": { - "boolbase": "^1.0.0" - } - }, - "node_modules/num2fraction": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", - "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", - "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", - "dependencies": { - "copy-descriptor": "^0.1.0", - "define-property": "^0.2.5", - "kind-of": "^3.0.3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-copy/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.9.0.tgz", - "integrity": "sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw==" - }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object-visit": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", - "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", - "dependencies": { - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.assign": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", - "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", - "dependencies": { - "call-bind": "^1.0.0", - "define-properties": "^1.1.3", - "has-symbols": "^1.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/object.pick": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", - "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", - "dependencies": { - "isobject": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/obuf": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", - "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==" - }, - "node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/opencollective-postinstall": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz", - "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==", - "bin": { - "opencollective-postinstall": "index.js" - } - }, - "node_modules/opn": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz", - "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==", - "dependencies": { - "is-wsl": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/optimize-css-assets-webpack-plugin": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz", - "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==", - "dependencies": { - "cssnano": "^4.1.10", - "last-call-webpack-plugin": "^3.0.0" - } - }, - "node_modules/original": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/original/-/original-1.0.2.tgz", - "integrity": "sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==", - "dependencies": { - "url-parse": "^1.4.3" - } - }, - "node_modules/os-browserify": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", - "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" - }, - "node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/p-retry": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-3.0.1.tgz", - "integrity": "sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w==", - "dependencies": { - "retry": "^0.12.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dependencies": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "node_modules/parallel-transform": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz", - "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", - "dependencies": { - "cyclist": "^1.0.1", - "inherits": "^2.0.3", - "readable-stream": "^2.1.5" - } - }, - "node_modules/param-case": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz", - "integrity": "sha1-35T9jPZTHs915r75oIWPvHK+Ikc=", - "dependencies": { - "no-case": "^2.2.0" - } - }, - "node_modules/parse-asn1": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", - "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", - "dependencies": { - "asn1.js": "^5.2.0", - "browserify-aes": "^1.0.0", - "evp_bytestokey": "^1.0.0", - "pbkdf2": "^3.0.3", - "safe-buffer": "^5.1.1" - } - }, - "node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/parse5": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", - "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==" - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", - "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", - "dependencies": { - "parse5": "^6.0.1" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/pascalcase": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", - "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-browserify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", - "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==" - }, - "node_modules/path-dirname": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", - "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=" - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" - }, - "node_modules/path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "engines": { - "node": ">=4" - } - }, - "node_modules/path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "node_modules/path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dependencies": { - "pify": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/path-type/node_modules/pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", - "engines": { - "node": ">=4" - } - }, - "node_modules/pbkdf2": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz", - "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==", - "dependencies": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - }, - "engines": { - "node": ">=0.12" - } - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "node_modules/picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==", - "optional": true, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pinkie": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", - "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", - "dependencies": { - "pinkie": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/portfinder": { - "version": "1.0.28", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz", - "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==", - "dependencies": { - "async": "^2.6.2", - "debug": "^3.1.1", - "mkdirp": "^0.5.5" - }, - "engines": { - "node": ">= 0.12.0" - } - }, - "node_modules/portfinder/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/portfinder/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/portfinder/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/posix-character-classes": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", - "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", - "dependencies": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-calc": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", - "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", - "dependencies": { - "postcss": "^7.0.27", - "postcss-selector-parser": "^6.0.2", - "postcss-value-parser": "^4.0.2" - } - }, - "node_modules/postcss-colormin": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz", - "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==", - "dependencies": { - "browserslist": "^4.0.0", - "color": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-colormin/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-convert-values": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz", - "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-convert-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-discard-comments": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz", - "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-duplicates": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz", - "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-empty": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz", - "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-discard-overridden": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz", - "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-load-config": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", - "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", - "dependencies": { - "cosmiconfig": "^5.0.0", - "import-cwd": "^2.0.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-loader": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-3.0.0.tgz", - "integrity": "sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==", - "dependencies": { - "loader-utils": "^1.1.0", - "postcss": "^7.0.0", - "postcss-load-config": "^2.0.0", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/postcss-merge-longhand": { - "version": "4.0.11", - "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz", - "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==", - "dependencies": { - "css-color-names": "0.0.4", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "stylehacks": "^4.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-longhand/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-merge-rules": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz", - "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "cssnano-util-same-parent": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0", - "vendors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-merge-rules/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-minify-font-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz", - "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-font-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz", - "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "is-color-stop": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-gradients/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-params": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz", - "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "browserslist": "^4.0.0", - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-params/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-minify-selectors": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz", - "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-minify-selectors/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss-modules-extract-imports": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz", - "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==", - "dependencies": { - "postcss": "^7.0.5" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz", - "integrity": "sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0", - "postcss-value-parser": "^3.3.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-local-by-default/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-modules-scope": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz", - "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==", - "dependencies": { - "postcss": "^7.0.6", - "postcss-selector-parser": "^6.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/postcss-modules-values": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz", - "integrity": "sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==", - "dependencies": { - "icss-replace-symbols": "^1.1.0", - "postcss": "^7.0.6" - } - }, - "node_modules/postcss-normalize-charset": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz", - "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==", - "dependencies": { - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz", - "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-display-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-positions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz", - "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-positions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-repeat-style": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz", - "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-repeat-style/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-string": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz", - "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==", - "dependencies": { - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-string/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-timing-functions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz", - "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-timing-functions/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-unicode": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz", - "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-unicode/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-url": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz", - "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==", - "dependencies": { - "is-absolute-url": "^2.0.0", - "normalize-url": "^3.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-url/node_modules/normalize-url": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz", - "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/postcss-normalize-url/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-normalize-whitespace": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz", - "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-normalize-whitespace/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-ordered-values": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz", - "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==", - "dependencies": { - "cssnano-util-get-arguments": "^4.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-ordered-values/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-reduce-initial": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz", - "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==", - "dependencies": { - "browserslist": "^4.0.0", - "caniuse-api": "^3.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz", - "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==", - "dependencies": { - "cssnano-util-get-match": "^4.0.0", - "has": "^1.0.0", - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-reduce-transforms/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-safe-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz", - "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==", - "dependencies": { - "postcss": "^7.0.26" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/postcss-selector-parser": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", - "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", - "dependencies": { - "cssesc": "^3.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/postcss-svgo": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz", - "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==", - "dependencies": { - "postcss": "^7.0.0", - "postcss-value-parser": "^3.0.0", - "svgo": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-svgo/node_modules/postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - }, - "node_modules/postcss-unique-selectors": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz", - "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==", - "dependencies": { - "alphanum-sort": "^1.0.0", - "postcss": "^7.0.0", - "uniqs": "^2.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/postcss-value-parser": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", - "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==" - }, - "node_modules/postcss/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/postcss/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", - "engines": { - "node": ">=4" - } - }, - "node_modules/prettier": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", - "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", - "optional": true, - "bin": { - "prettier": "bin-prettier.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/pretty-error": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz", - "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==", - "dependencies": { - "lodash": "^4.17.20", - "renderkid": "^2.0.4" - } - }, - "node_modules/pretty-time": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", - "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "dependencies": { - "clipboard": "^2.0.0" - }, - "optionalDependencies": { - "clipboard": "^2.0.0" - } - }, - "node_modules/process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", - "engines": { - "node": ">= 0.6.0" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "node_modules/promise": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", - "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", - "dependencies": { - "asap": "~2.0.3" - } - }, - "node_modules/promise-inflight": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", - "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" - }, - "node_modules/proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", - "dependencies": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/prr": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=" - }, - "node_modules/pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=" - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "node_modules/public-encrypt": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", - "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", - "dependencies": { - "bn.js": "^4.1.0", - "browserify-rsa": "^4.0.0", - "create-hash": "^1.1.0", - "parse-asn1": "^5.0.0", - "randombytes": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "node_modules/public-encrypt/node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" - }, - "node_modules/pug": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.2.tgz", - "integrity": "sha512-bp0I/hiK1D1vChHh6EfDxtndHji55XP/ZJKwsRqrz6lRia6ZC2OZbdAymlxdVFwd1L70ebrVJw4/eZ79skrIaw==", - "dependencies": { - "pug-code-gen": "^3.0.2", - "pug-filters": "^4.0.0", - "pug-lexer": "^5.0.1", - "pug-linker": "^4.0.0", - "pug-load": "^3.0.0", - "pug-parser": "^6.0.0", - "pug-runtime": "^3.0.1", - "pug-strip-comments": "^2.0.0" - } - }, - "node_modules/pug-attrs": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz", - "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==", - "dependencies": { - "constantinople": "^4.0.1", - "js-stringify": "^1.0.2", - "pug-runtime": "^3.0.0" - } - }, - "node_modules/pug-code-gen": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.2.tgz", - "integrity": "sha512-nJMhW16MbiGRiyR4miDTQMRWDgKplnHyeLvioEJYbk1RsPI3FuA3saEP8uwnTb2nTJEKBU90NFVWJBk4OU5qyg==", - "dependencies": { - "constantinople": "^4.0.1", - "doctypes": "^1.1.0", - "js-stringify": "^1.0.2", - "pug-attrs": "^3.0.0", - "pug-error": "^2.0.0", - "pug-runtime": "^3.0.0", - "void-elements": "^3.1.0", - "with": "^7.0.0" - } - }, - "node_modules/pug-error": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.0.0.tgz", - "integrity": "sha512-sjiUsi9M4RAGHktC1drQfCr5C5eriu24Lfbt4s+7SykztEOwVZtbFk1RRq0tzLxcMxMYTBR+zMQaG07J/btayQ==" - }, - "node_modules/pug-filters": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz", - "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==", - "dependencies": { - "constantinople": "^4.0.1", - "jstransformer": "1.0.0", - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0", - "resolve": "^1.15.1" - } - }, - "node_modules/pug-lexer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz", - "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==", - "dependencies": { - "character-parser": "^2.2.0", - "is-expression": "^4.0.0", - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-linker": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz", - "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==", - "dependencies": { - "pug-error": "^2.0.0", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-load": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz", - "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==", - "dependencies": { - "object-assign": "^4.1.1", - "pug-walk": "^2.0.0" - } - }, - "node_modules/pug-parser": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz", - "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==", - "dependencies": { - "pug-error": "^2.0.0", - "token-stream": "1.0.0" - } - }, - "node_modules/pug-plain-loader": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz", - "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==", - "dependencies": { - "loader-utils": "^1.1.0" - } - }, - "node_modules/pug-runtime": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz", - "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg==" - }, - "node_modules/pug-strip-comments": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz", - "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==", - "dependencies": { - "pug-error": "^2.0.0" - } - }, - "node_modules/pug-walk": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz", - "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ==" - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/pumpify": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", - "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", - "dependencies": { - "duplexify": "^3.6.0", - "inherits": "^2.0.3", - "pump": "^2.0.0" - } - }, - "node_modules/pumpify/node_modules/pump": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", - "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/pupa": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", - "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", - "dependencies": { - "escape-goat": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/q": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", - "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", - "engines": { - "node": ">=0.6.0", - "teleport": ">=0.2.0" - } - }, - "node_modules/qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "dependencies": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/querystring": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz", - "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystring-es3": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", - "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" - }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, - "node_modules/randomfill": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", - "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", - "dependencies": { - "randombytes": "^2.0.5", - "safe-buffer": "^5.1.0" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", - "dependencies": { - "bytes": "3.1.0", - "http-errors": "1.7.2", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/readdirp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", - "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", - "dependencies": { - "graceful-fs": "^4.1.11", - "micromatch": "^3.1.10", - "readable-stream": "^2.0.2" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/reduce": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz", - "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==", - "dependencies": { - "object-keys": "^1.1.0" - } - }, - "node_modules/regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" - }, - "node_modules/regenerate-unicode-properties": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", - "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", - "dependencies": { - "regenerate": "^1.4.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/regenerator-runtime": { - "version": "0.13.7", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", - "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" - }, - "node_modules/regenerator-transform": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz", - "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, - "node_modules/regex-not": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", - "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", - "dependencies": { - "extend-shallow": "^3.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regex-not/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/regexpu-core": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz", - "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==", - "dependencies": { - "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.2.0", - "regjsgen": "^0.5.1", - "regjsparser": "^0.6.4", - "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/registry-auth-token": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", - "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dependencies": { - "rc": "^1.2.8" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/regjsgen": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", - "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==" - }, - "node_modules/regjsparser": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz", - "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==", - "dependencies": { - "jsesc": "~0.5.0" - }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", - "bin": { - "jsesc": "bin/jsesc" - } - }, - "node_modules/relateurl": { - "version": "0.2.7", - "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", - "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/remove-trailing-separator": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", - "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" - }, - "node_modules/renderkid": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz", - "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==", - "dependencies": { - "css-select": "^2.0.2", - "dom-converter": "^0.2", - "htmlparser2": "^3.10.1", - "lodash": "^4.17.20", - "strip-ansi": "^3.0.0" - } - }, - "node_modules/renderkid/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/renderkid/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/renderkid/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/renderkid/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/renderkid/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/renderkid/node_modules/domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dependencies": { - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", - "dependencies": { - "domelementtype": "^1.3.1", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^3.1.1" - } - }, - "node_modules/renderkid/node_modules/htmlparser2/node_modules/entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - }, - "node_modules/renderkid/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/renderkid/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/repeat-element": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz", - "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "engines": { - "node": ">=0.10" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/request/node_modules/qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" - }, - "node_modules/requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" - }, - "node_modules/resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", - "dependencies": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" - } - }, - "node_modules/resolve-cwd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz", - "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=", - "dependencies": { - "resolve-from": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-from": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", - "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", - "engines": { - "node": ">=4" - } - }, - "node_modules/resolve-url": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", - "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=" - }, - "node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "engines": { - "node": ">=0.12" - } - }, - "node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", - "engines": { - "node": ">= 4" - } - }, - "node_modules/rgb-regex": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz", - "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE=" - }, - "node_modules/rgba-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz", - "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM=" - }, - "node_modules/rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - } - }, - "node_modules/ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "dependencies": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "node_modules/run-queue": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz", - "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=", - "dependencies": { - "aproba": "^1.1.1" - } - }, - "node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "node_modules/safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dependencies": { - "ret": "~0.1.10" - } - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/sax": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz", - "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" - }, - "node_modules/schema-utils": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", - "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", - "dependencies": { - "@types/json-schema": "^7.0.5", - "ajv": "^6.12.4", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 8.9.0" - } - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, - "node_modules/select-hose": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", - "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=" - }, - "node_modules/selfsigned": { - "version": "1.10.8", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz", - "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==", - "dependencies": { - "node-forge": "^0.10.0" - } - }, - "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "dependencies": { - "semver": "^6.3.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", - "dependencies": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.7.2", - "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", - "range-parser": "~1.2.1", - "statuses": "~1.5.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/send/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/send/node_modules/debug/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/send/node_modules/mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", - "bin": { - "mime": "cli.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/send/node_modules/ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" - }, - "node_modules/serialize-javascript": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", - "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=", - "dependencies": { - "accepts": "~1.3.4", - "batch": "0.6.1", - "debug": "2.6.9", - "escape-html": "~1.0.3", - "http-errors": "~1.6.2", - "mime-types": "~2.1.17", - "parseurl": "~1.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/serve-index/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/serve-index/node_modules/http-errors": { - "version": "1.6.3", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/serve-index/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/serve-index/node_modules/setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "node_modules/serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", - "dependencies": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.3", - "send": "0.17.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=" - }, - "node_modules/set-value": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", - "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", - "dependencies": { - "extend-shallow": "^2.0.1", - "is-extendable": "^0.1.1", - "is-plain-object": "^2.0.3", - "split-string": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/setimmediate": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", - "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=" - }, - "node_modules/setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" - }, - "node_modules/sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "dependencies": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - }, - "bin": { - "sha.js": "bin.js" - } - }, - "node_modules/shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dependencies": { - "shebang-regex": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - }, - "node_modules/sitemap": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz", - "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==", - "dependencies": { - "lodash.chunk": "^4.2.0", - "lodash.padstart": "^4.6.1", - "whatwg-url": "^7.0.0", - "xmlbuilder": "^13.0.0" - }, - "engines": { - "node": ">=6.0.0", - "npm": ">=4.0.0" - } - }, - "node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/smoothscroll-polyfill": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz", - "integrity": "sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg==" - }, - "node_modules/snapdragon": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", - "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", - "dependencies": { - "base": "^0.11.1", - "debug": "^2.2.0", - "define-property": "^0.2.5", - "extend-shallow": "^2.0.1", - "map-cache": "^0.2.2", - "source-map": "^0.5.6", - "source-map-resolve": "^0.5.0", - "use": "^3.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", - "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", - "dependencies": { - "define-property": "^1.0.0", - "isobject": "^3.0.0", - "snapdragon-util": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", - "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", - "dependencies": { - "is-descriptor": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-accessor-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", - "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-data-descriptor": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", - "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", - "dependencies": { - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-node/node_modules/is-descriptor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", - "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", - "dependencies": { - "is-accessor-descriptor": "^1.0.0", - "is-data-descriptor": "^1.0.0", - "kind-of": "^6.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", - "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", - "dependencies": { - "kind-of": "^3.2.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon-util/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/snapdragon/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/snapdragon/node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sockjs": { - "version": "0.3.21", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz", - "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==", - "dependencies": { - "faye-websocket": "^0.11.3", - "uuid": "^3.4.0", - "websocket-driver": "^0.7.4" - } - }, - "node_modules/sockjs-client": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz", - "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==", - "dependencies": { - "debug": "^3.2.6", - "eventsource": "^1.0.7", - "faye-websocket": "^0.11.3", - "inherits": "^2.0.4", - "json3": "^3.3.3", - "url-parse": "^1.5.1" - } - }, - "node_modules/sockjs-client/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/sockjs-client/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", - "dependencies": { - "is-plain-obj": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/source-list-map": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", - "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==" - }, - "node_modules/source-map": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz", - "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/source-map-resolve": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", - "dependencies": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/source-map-support/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-url": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz", - "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==" - }, - "node_modules/spdy": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", - "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", - "dependencies": { - "debug": "^4.1.0", - "handle-thing": "^2.0.0", - "http-deceiver": "^1.2.7", - "select-hose": "^2.0.0", - "spdy-transport": "^3.0.0" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/spdy-transport": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", - "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", - "dependencies": { - "debug": "^4.1.0", - "detect-node": "^2.0.4", - "hpack.js": "^2.1.6", - "obuf": "^1.1.2", - "readable-stream": "^3.0.6", - "wbuf": "^1.7.3" - } - }, - "node_modules/spdy-transport/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy-transport/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/spdy-transport/node_modules/readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/spdy/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/spdy/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/split-string": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", - "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", - "dependencies": { - "extend-shallow": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/split-string/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "node_modules/sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ssri": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz", - "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==", - "dependencies": { - "figgy-pudding": "^3.5.1" - } - }, - "node_modules/stable": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz", - "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" - }, - "node_modules/stack-utils": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.5.tgz", - "integrity": "sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ==", - "dependencies": { - "escape-string-regexp": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stack-utils/node_modules/escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "engines": { - "node": ">=8" - } - }, - "node_modules/static-extend": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", - "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", - "dependencies": { - "define-property": "^0.2.5", - "object-copy": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/static-extend/node_modules/define-property": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", - "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", - "dependencies": { - "is-descriptor": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/std-env": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz", - "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==", - "dependencies": { - "ci-info": "^3.0.0" - } - }, - "node_modules/stream-browserify": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", - "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", - "dependencies": { - "inherits": "~2.0.1", - "readable-stream": "^2.0.2" - } - }, - "node_modules/stream-each": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz", - "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", - "dependencies": { - "end-of-stream": "^1.1.0", - "stream-shift": "^1.0.0" - } - }, - "node_modules/stream-http": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", - "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", - "dependencies": { - "builtin-status-codes": "^3.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.3.6", - "to-arraybuffer": "^1.0.0", - "xtend": "^4.0.0" - } - }, - "node_modules/stream-shift": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", - "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" - }, - "node_modules/strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dependencies": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/string.prototype.trimend": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz", - "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/string.prototype.trimstart": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz", - "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==", - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stylehacks": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz", - "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==", - "dependencies": { - "browserslist": "^4.0.0", - "postcss": "^7.0.0", - "postcss-selector-parser": "^3.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/stylehacks/node_modules/postcss-selector-parser": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", - "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", - "dependencies": { - "dot-prop": "^5.2.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stylus": { - "version": "0.54.8", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", - "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", - "dependencies": { - "css-parse": "~2.0.0", - "debug": "~3.1.0", - "glob": "^7.1.6", - "mkdirp": "~1.0.4", - "safer-buffer": "^2.1.2", - "sax": "~1.2.4", - "semver": "^6.3.0", - "source-map": "^0.7.3" - }, - "bin": { - "stylus": "bin/stylus" - }, - "engines": { - "node": "*" - } - }, - "node_modules/stylus-loader": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-3.0.2.tgz", - "integrity": "sha512-+VomPdZ6a0razP+zinir61yZgpw2NfljeSsdUF5kJuEzlo3khXhY19Fn6l8QQz1GRJGtMCo8nG5C04ePyV7SUA==", - "dependencies": { - "loader-utils": "^1.0.2", - "lodash.clonedeep": "^4.5.0", - "when": "~3.6.x" - } - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/svg-tags": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", - "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=" - }, - "node_modules/svgo": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz", - "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==", - "dependencies": { - "chalk": "^2.4.1", - "coa": "^2.0.2", - "css-select": "^2.0.0", - "css-select-base-adapter": "^0.1.1", - "css-tree": "1.0.0-alpha.37", - "csso": "^4.0.2", - "js-yaml": "^3.13.1", - "mkdirp": "~0.5.1", - "object.values": "^1.1.0", - "sax": "~1.2.4", - "stable": "^0.1.8", - "unquote": "~1.1.1", - "util.promisify": "~1.0.0" - }, - "bin": { - "svgo": "bin/svgo" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/svgo/node_modules/css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "node_modules/svgo/node_modules/css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/svgo/node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - } - }, - "node_modules/svgo/node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - }, - "node_modules/svgo/node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "node_modules/svgo/node_modules/domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "node_modules/svgo/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/svgo/node_modules/nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "dependencies": { - "boolbase": "~1.0.0" - } - }, - "node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/term-size": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", - "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/terser": { - "version": "4.8.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", - "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", - "dependencies": { - "commander": "^2.20.0", - "source-map": "~0.6.1", - "source-map-support": "~0.5.12" - }, - "bin": { - "terser": "bin/terser" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/terser-webpack-plugin": { - "version": "1.4.5", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", - "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", - "dependencies": { - "cacache": "^12.0.2", - "find-cache-dir": "^2.1.0", - "is-wsl": "^1.1.0", - "schema-utils": "^1.0.0", - "serialize-javascript": "^4.0.0", - "source-map": "^0.6.1", - "terser": "^4.1.2", - "webpack-sources": "^1.4.0", - "worker-farm": "^1.7.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dependencies": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dependencies": { - "find-up": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/terser-webpack-plugin/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/terser-webpack-plugin/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/terser/node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, - "node_modules/terser/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" - }, - "node_modules/through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" - }, - "node_modules/through2": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", - "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", - "dependencies": { - "readable-stream": "~2.3.6", - "xtend": "~4.0.1" - } - }, - "node_modules/thunky": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", - "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" - }, - "node_modules/timers-browserify": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", - "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", - "dependencies": { - "setimmediate": "^1.0.4" - }, - "engines": { - "node": ">=0.6.0" - } - }, - "node_modules/timsort": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz", - "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" - }, - "node_modules/tiny-cookie": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", - "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" - }, - "node_modules/tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, - "node_modules/to-arraybuffer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", - "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=" - }, - "node_modules/to-factory": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz", - "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE=" - }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "engines": { - "node": ">=4" - } - }, - "node_modules/to-object-path": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", - "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", - "dependencies": { - "kind-of": "^3.0.2" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-object-path/node_modules/kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dependencies": { - "is-buffer": "^1.1.5" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "engines": { - "node": ">=6" - } - }, - "node_modules/to-regex": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", - "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", - "dependencies": { - "define-property": "^2.0.2", - "extend-shallow": "^3.0.2", - "regex-not": "^1.0.2", - "safe-regex": "^1.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex-range": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", - "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", - "dependencies": { - "is-number": "^3.0.0", - "repeat-string": "^1.6.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/extend-shallow": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", - "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", - "dependencies": { - "assign-symbols": "^1.0.0", - "is-extendable": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/to-regex/node_modules/is-extendable": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", - "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", - "dependencies": { - "is-plain-object": "^2.0.4" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/token-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz", - "integrity": "sha1-zCAOqyYT9BZtJ/+a/HylbUnfbrQ=" - }, - "node_modules/toml": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz", - "integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==" - }, - "node_modules/toposort": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz", - "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk=" - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tr46": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", - "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/tty-browserify": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", - "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=" - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "node_modules/type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "engines": { - "node": ">=10" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" - }, - "node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dependencies": { - "is-typedarray": "^1.0.0" - } - }, - "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" - }, - "node_modules/uglify-js": { - "version": "3.4.10", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.10.tgz", - "integrity": "sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==", - "dependencies": { - "commander": "~2.19.0", - "source-map": "~0.6.1" - }, - "bin": { - "uglifyjs": "bin/uglifyjs" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/uglify-js/node_modules/commander": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.19.0.tgz", - "integrity": "sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg==" - }, - "node_modules/uglify-js/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unbox-primitive": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz", - "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==", - "dependencies": { - "function-bind": "^1.1.1", - "has-bigints": "^1.0.1", - "has-symbols": "^1.0.2", - "which-boxed-primitive": "^1.0.2" - } - }, - "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", - "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", - "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", - "dependencies": { - "unicode-canonical-property-names-ecmascript": "^1.0.4", - "unicode-property-aliases-ecmascript": "^1.0.4" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-match-property-value-ecmascript": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", - "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==", - "engines": { - "node": ">=4" - } - }, - "node_modules/unicode-property-aliases-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", - "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==", - "engines": { - "node": ">=4" - } - }, - "node_modules/union-value": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", - "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", - "dependencies": { - "arr-union": "^3.1.0", - "get-value": "^2.0.6", - "is-extendable": "^0.1.1", - "set-value": "^2.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/uniq": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", - "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=" - }, - "node_modules/uniqs": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz", - "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI=" - }, - "node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "dependencies": { - "imurmurhash": "^0.1.4" - } - }, - "node_modules/unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "dependencies": { - "crypto-random-string": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/unquote": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz", - "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ=" - }, - "node_modules/unset-value": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", - "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", - "dependencies": { - "has-value": "^0.3.1", - "isobject": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", - "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", - "dependencies": { - "get-value": "^2.0.3", - "has-values": "^0.1.4", - "isobject": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-value/node_modules/isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dependencies": { - "isarray": "1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/unset-value/node_modules/has-values": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", - "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/upath": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", - "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", - "engines": { - "node": ">=4", - "yarn": "*" - } - }, - "node_modules/update-notifier": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz", - "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==", - "dependencies": { - "boxen": "^4.2.0", - "chalk": "^3.0.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.1", - "is-npm": "^4.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.0.0", - "pupa": "^2.0.1", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/update-notifier/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/update-notifier/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/update-notifier/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/upper-case": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-1.1.3.tgz", - "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg=" - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/urix": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", - "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=" - }, - "node_modules/url": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", - "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dependencies": { - "punycode": "1.3.2", - "querystring": "0.2.0" - } - }, - "node_modules/url-loader": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-1.1.2.tgz", - "integrity": "sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==", - "dependencies": { - "loader-utils": "^1.1.0", - "mime": "^2.0.3", - "schema-utils": "^1.0.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/url-loader/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", - "dependencies": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/url/node_modules/punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=" - }, - "node_modules/url/node_modules/querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/use": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", - "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/util": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz", - "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", - "dependencies": { - "inherits": "2.0.3" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "node_modules/util.promisify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz", - "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==", - "dependencies": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.2", - "has-symbols": "^1.0.1", - "object.getownpropertydescriptors": "^2.1.0" - } - }, - "node_modules/util/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "node_modules/utila": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", - "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw=" - }, - "node_modules/utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/v-runtime-template": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/v-runtime-template/-/v-runtime-template-1.10.0.tgz", - "integrity": "sha512-WLlq9jUepSfUrMEenw3mn7FDXX6hhbl11JjC1OKhwLzifHzVrY5a696TUHDPyj9jke3GGnR7b+2T3od/RL5cww==" - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vendors": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz", - "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==" - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/vm-browserify": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", - "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==" - }, - "node_modules/void-elements": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", - "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz", - "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg==" - }, - "node_modules/vue-hot-reload-api": { - "version": "2.3.4", - "resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz", - "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" - }, - "node_modules/vue-loader": { - "version": "15.9.6", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz", - "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==", - "dependencies": { - "@vue/component-compiler-utils": "^3.1.0", - "hash-sum": "^1.0.2", - "loader-utils": "^1.1.0", - "vue-hot-reload-api": "^2.3.0", - "vue-style-loader": "^4.1.0" - } - }, - "node_modules/vue-router": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz", - "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw==" - }, - "node_modules/vue-server-renderer": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz", - "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==", - "dependencies": { - "chalk": "^1.1.3", - "hash-sum": "^1.0.2", - "he": "^1.1.0", - "lodash.template": "^4.5.0", - "lodash.uniq": "^4.5.0", - "resolve": "^1.2.0", - "serialize-javascript": "^3.1.0", - "source-map": "0.5.6" - } - }, - "node_modules/vue-server-renderer/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/serialize-javascript": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz", - "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==", - "dependencies": { - "randombytes": "^2.1.0" - } - }, - "node_modules/vue-server-renderer/node_modules/source-map": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz", - "integrity": "sha1-dc449SvwczxafwwRjYEzSiu19BI=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/vue-server-renderer/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/vue-style-loader": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.3.tgz", - "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==", - "dependencies": { - "hash-sum": "^1.0.2", - "loader-utils": "^1.0.2" - } - }, - "node_modules/vue-template-compiler": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz", - "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==", - "dependencies": { - "de-indent": "^1.0.2", - "he": "^1.1.0" - } - }, - "node_modules/vue-template-es2015-compiler": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz", - "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" - }, - "node_modules/vuepress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz", - "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==", - "hasInstallScript": true, - "dependencies": { - "@vuepress/core": "1.8.2", - "@vuepress/theme-default": "1.8.2", - "cac": "^6.5.6", - "envinfo": "^7.2.0", - "opencollective-postinstall": "^2.0.2", - "update-notifier": "^4.0.0" - }, - "bin": { - "vuepress": "cli.js" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/vuepress-html-webpack-plugin": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz", - "integrity": "sha512-BebAEl1BmWlro3+VyDhIOCY6Gef2MCBllEVAP3NUAtMguiyOwo/dClbwJ167WYmcxHJKLl7b0Chr9H7fpn1d0A==", - "dependencies": { - "html-minifier": "^3.2.3", - "loader-utils": "^0.2.16", - "lodash": "^4.17.3", - "pretty-error": "^2.0.2", - "tapable": "^1.0.0", - "toposort": "^1.0.0", - "util.promisify": "1.0.0" - }, - "engines": { - "node": ">=6.9" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/big.js": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", - "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==", - "engines": { - "node": "*" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", - "bin": { - "json5": "lib/cli.js" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/loader-utils": { - "version": "0.2.17", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz", - "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=", - "dependencies": { - "big.js": "^3.1.3", - "emojis-list": "^2.0.0", - "json5": "^0.5.0", - "object-assign": "^4.0.1" - } - }, - "node_modules/vuepress-html-webpack-plugin/node_modules/util.promisify": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz", - "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==", - "dependencies": { - "define-properties": "^1.1.2", - "object.getownpropertydescriptors": "^2.0.3" - } - }, - "node_modules/vuepress-plugin-container": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz", - "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==", - "dependencies": { - "@vuepress/shared-utils": "^1.2.0", - "markdown-it-container": "^2.0.0" - } - }, - "node_modules/vuepress-plugin-google-tag-manager": { - "version": "0.0.5", - "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz", - "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg==" - }, - "node_modules/vuepress-plugin-sitemap": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz", - "integrity": "sha512-n+8lbukhrKrsI9H/EX0EBgkE1pn85LAQFvQ5dIvrZP4Kz6JxPOPPNTQmZMhahQV1tXbLZQCEN7A1WZH4x+arJQ==", - "dependencies": { - "sitemap": "^3.0.0" - }, - "bin": { - "vuepress-sitemap": "cli.js" - } - }, - "node_modules/vuepress-plugin-smooth-scroll": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz", - "integrity": "sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==", - "dependencies": { - "smoothscroll-polyfill": "^0.4.3" - } - }, - "node_modules/vuepress-theme-cosmos": { - "version": "1.0.182", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz", - "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==", - "dependencies": { - "@cosmos-ui/vue": "^0.35.0", - "@vuepress/plugin-google-analytics": "1.7.1", - "algoliasearch": "^4.2.0", - "axios": "^0.21.0", - "cheerio": "^1.0.0-rc.3", - "clipboard-copy": "^3.1.0", - "entities": "2.1.0", - "esm": "^3.2.25", - "gray-matter": "^4.0.2", - "hotkeys-js": "3.8.1", - "jsonp": "^0.2.1", - "markdown-it": "^12.0.0", - "markdown-it-attrs": "^3.0.3", - "prismjs": "^1.22.0", - "pug": "^3.0.1", - "pug-plain-loader": "^1.0.0", - "stylus": "^0.54.8", - "stylus-loader": "^3.0.2", - "tiny-cookie": "^2.3.2", - "v-runtime-template": "^1.10.0", - "vuepress": "^1.5.4", - "vuepress-plugin-google-tag-manager": "0.0.5", - "vuepress-plugin-sitemap": "^2.3.1" - } - }, - "node_modules/watchpack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.2.0.tgz", - "integrity": "sha512-up4YAn/XHgZHIxFBVCdlMiWDj6WaLKpwVeGQk2I5thdYxF/KmF0aaz6TfJZ/hfl1h/XlcDr7k1KH7ThDagpFaA==", - "dev": true, - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/watchpack-chokidar2": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz", - "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==", - "optional": true, - "dependencies": { - "chokidar": "^2.1.8" - } - }, - "node_modules/watchpack/node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", - "dev": true - }, - "node_modules/wbuf": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", - "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", - "dependencies": { - "minimalistic-assert": "^1.0.0" - } - }, - "node_modules/webidl-conversions": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", - "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" - }, - "node_modules/webpack": { - "version": "4.46.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.46.0.tgz", - "integrity": "sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q==", - "dependencies": { - "@webassemblyjs/ast": "1.9.0", - "@webassemblyjs/helper-module-context": "1.9.0", - "@webassemblyjs/wasm-edit": "1.9.0", - "@webassemblyjs/wasm-parser": "1.9.0", - "acorn": "^6.4.1", - "ajv": "^6.10.2", - "ajv-keywords": "^3.4.1", - "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^4.5.0", - "eslint-scope": "^4.0.3", - "json-parse-better-errors": "^1.0.2", - "loader-runner": "^2.4.0", - "loader-utils": "^1.2.3", - "memory-fs": "^0.4.1", - "micromatch": "^3.1.10", - "mkdirp": "^0.5.3", - "neo-async": "^2.6.1", - "node-libs-browser": "^2.2.1", - "schema-utils": "^1.0.0", - "tapable": "^1.1.3", - "terser-webpack-plugin": "^1.4.3", - "watchpack": "^1.7.4", - "webpack-sources": "^1.4.1" - }, - "bin": { - "webpack": "bin/webpack.js" - }, - "engines": { - "node": ">=6.11.5" - } - }, - "node_modules/webpack-chain": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz", - "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==", - "dependencies": { - "deepmerge": "^1.5.2", - "javascript-stringify": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-chain/node_modules/javascript-stringify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.0.1.tgz", - "integrity": "sha512-yV+gqbd5vaOYjqlbk16EG89xB5udgjqQF3C5FAORDg4f/IS1Yc5ERCv5e/57yBcfJYw05V5JyIXabhwb75Xxow==" - }, - "node_modules/webpack-dev-middleware": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.7.3.tgz", - "integrity": "sha512-djelc/zGiz9nZj/U7PTBi2ViorGJXEWo/3ltkPbDyxCXhhEXkW0ce99falaok4TPj+AsxLiXJR0EBOb0zh9fKQ==", - "dependencies": { - "memory-fs": "^0.4.1", - "mime": "^2.4.4", - "mkdirp": "^0.5.1", - "range-parser": "^1.2.1", - "webpack-log": "^2.0.0" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-dev-middleware/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack-dev-server": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz", - "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==", - "dependencies": { - "ansi-html": "0.0.7", - "bonjour": "^3.5.0", - "chokidar": "^2.1.8", - "compression": "^1.7.4", - "connect-history-api-fallback": "^1.6.0", - "debug": "^4.1.1", - "del": "^4.1.1", - "express": "^4.17.1", - "html-entities": "^1.3.1", - "http-proxy-middleware": "0.19.1", - "import-local": "^2.0.0", - "internal-ip": "^4.3.0", - "ip": "^1.1.5", - "is-absolute-url": "^3.0.3", - "killable": "^1.0.1", - "loglevel": "^1.6.8", - "opn": "^5.5.0", - "p-retry": "^3.0.1", - "portfinder": "^1.0.26", - "schema-utils": "^1.0.0", - "selfsigned": "^1.10.8", - "semver": "^6.3.0", - "serve-index": "^1.9.1", - "sockjs": "^0.3.21", - "sockjs-client": "^1.5.0", - "spdy": "^4.0.2", - "strip-ansi": "^3.0.1", - "supports-color": "^6.1.0", - "url": "^0.11.0", - "webpack-dev-middleware": "^3.7.2", - "webpack-log": "^2.0.0", - "ws": "^6.2.1", - "yargs": "^13.3.2" - }, - "bin": { - "webpack-dev-server": "bin/webpack-dev-server.js" - }, - "engines": { - "node": ">= 6.11.5" - } - }, - "node_modules/webpack-dev-server/node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/webpack-dev-server/node_modules/is-absolute-url": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", - "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack-dev-server/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/webpack-dev-server/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack-dev-server/node_modules/supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/webpack-log": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-2.0.0.tgz", - "integrity": "sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==", - "dependencies": { - "ansi-colors": "^3.0.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack-merge": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz", - "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==", - "dependencies": { - "lodash": "^4.17.15" - } - }, - "node_modules/webpack-sources": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz", - "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", - "dependencies": { - "source-list-map": "^2.0.0", - "source-map": "~0.6.1" - } - }, - "node_modules/webpack-sources/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/webpack/node_modules/acorn": { - "version": "6.4.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", - "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/webpack/node_modules/anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "optional": true, - "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/webpack/node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "optional": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "optional": true, - "dependencies": { - "fill-range": "^7.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", - "optional": true, - "dependencies": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" - }, - "engines": { - "node": ">= 8.10.0" - } - }, - "node_modules/webpack/node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "optional": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/webpack/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "optional": true, - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/webpack/node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "optional": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/webpack/node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "optional": true, - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/webpack/node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/webpack/node_modules/readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", - "optional": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", - "dependencies": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" - }, - "engines": { - "node": ">= 4" - } - }, - "node_modules/webpack/node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "optional": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/webpack/node_modules/watchpack": { - "version": "1.7.5", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz", - "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==", - "dependencies": { - "chokidar": "^3.4.1", - "graceful-fs": "^4.1.2", - "neo-async": "^2.5.0", - "watchpack-chokidar2": "^2.0.1" - }, - "optionalDependencies": { - "chokidar": "^3.4.1", - "watchpack-chokidar2": "^2.0.1" - } - }, - "node_modules/webpackbar": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-3.2.0.tgz", - "integrity": "sha512-PC4o+1c8gWWileUfwabe0gqptlXUDJd5E0zbpr2xHP1VSOVlZVPBZ8j6NCR8zM5zbKdxPhctHXahgpNK1qFDPw==", - "dependencies": { - "ansi-escapes": "^4.1.0", - "chalk": "^2.4.1", - "consola": "^2.6.0", - "figures": "^3.0.0", - "pretty-time": "^1.1.0", - "std-env": "^2.2.1", - "text-table": "^0.2.0", - "wrap-ansi": "^5.1.0" - }, - "engines": { - "node": ">= 6.9.0" - } - }, - "node_modules/websocket-driver": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", - "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", - "dependencies": { - "http-parser-js": ">=0.5.1", - "safe-buffer": ">=5.1.0", - "websocket-extensions": ">=0.1.1" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/websocket-extensions": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", - "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/whatwg-url": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", - "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", - "dependencies": { - "lodash.sortby": "^4.7.0", - "tr46": "^1.0.1", - "webidl-conversions": "^4.0.2" - } - }, - "node_modules/when": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/when/-/when-3.6.4.tgz", - "integrity": "sha1-RztRfsFZ4rhQBUl6E5g/CVQS404=" - }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, - "node_modules/which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "node_modules/which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=" - }, - "node_modules/widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "dependencies": { - "string-width": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/widest-line/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/widest-line/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/with": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz", - "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==", - "dependencies": { - "@babel/parser": "^7.9.6", - "@babel/types": "^7.9.6", - "assert-never": "^1.2.1", - "babel-walk": "3.0.0-canary-5" - }, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/worker-farm": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz", - "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", - "dependencies": { - "errno": "~0.1.7" - } - }, - "node_modules/wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dependencies": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dependencies": { - "ansi-regex": "^4.1.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "node_modules/write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dependencies": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "node_modules/ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", - "dependencies": { - "async-limiter": "~1.0.0" - } - }, - "node_modules/xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/xmlbuilder": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz", - "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==", - "engines": { - "node": ">=6.0" - } - }, - "node_modules/xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "engines": { - "node": ">=0.4" - } - }, - "node_modules/y18n": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", - "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==" - }, - "node_modules/yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "node_modules/yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dependencies": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - } - }, - "node_modules/yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dependencies": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "node_modules/yargs-parser/node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/yargs/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "engines": { - "node": ">=4" - } - }, - "node_modules/zepto": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz", - "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g=" - } - }, "dependencies": { "@algolia/cache-browser-local-storage": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.8.6.tgz", - "integrity": "sha512-Bam7otzjIEgrRXWmk0Amm1+B3ROI5dQnUfJEBjIy0YPM0kMahEoJXCw6160tGKxJLl1g6icoC953nGshQKO7cA==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.12.0.tgz", + "integrity": "sha512-l+G560B6N1k0rIcOjTO1yCzFUbg2Zy2HCii9s03e13jGgqduVQmk79UUCYszjsJ5GPJpUEKcVEtAIpP7tjsXVA==", "requires": { - "@algolia/cache-common": "4.8.6" + "@algolia/cache-common": "4.12.0" } }, "@algolia/cache-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.8.6.tgz", - "integrity": "sha512-eGQlsXU5G7n4RvV/K6qe6lRAeL6EKAYPT3yZDBjCW4pAh7JWta+77a7BwUQkTqXN1MEQWZXjex3E4z/vFpzNrg==" + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.12.0.tgz", + "integrity": "sha512-2Z8BV+NX7oN7RmmQbLqmW8lfN9aAjOexX1FJjzB0YfKC9ifpi9Jl4nSxlnbU+iLR6QhHo0IfuyQ7wcnucCGCGQ==" }, "@algolia/cache-in-memory": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.8.6.tgz", - "integrity": "sha512-kbJrvCFANxL/l5Pq1NFyHLRphKDwmqcD/OJga0IbNKEulRGDPkt1+pC7/q8d2ikP12adBjLLg2CVias9RJpIaw==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.12.0.tgz", + "integrity": "sha512-b6ANkZF6vGAo+sYv6g25W5a0u3o6F549gEAgtTDTVA1aHcdWwe/HG/dTJ7NsnHbuR+A831tIwnNYQjRp3/V/Jw==", "requires": { - "@algolia/cache-common": "4.8.6" + "@algolia/cache-common": "4.12.0" } }, "@algolia/client-account": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.8.6.tgz", - "integrity": "sha512-FQVJE/BgCb78jtG7V0r30sMl9P5JKsrsOacGtGF2YebqI0YF25y8Z1nO39lbdjahxUS3QkDw2d0P2EVMj65g2Q==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.12.0.tgz", + "integrity": "sha512-gzXN75ZydNheNXUN3epS+aLsKnB/PHFVlGUUjXL8WHs4lJP3B5FtHvaA/NCN5DsM3aamhuY5p0ff1XIA+Lbcrw==", "requires": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/transporter": "4.8.6" + "@algolia/client-common": "4.12.0", + "@algolia/client-search": "4.12.0", + "@algolia/transporter": "4.12.0" } }, "@algolia/client-analytics": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.8.6.tgz", - "integrity": "sha512-ZBYFUlzNaWDFtt0rYHI7xbfVX0lPWU9lcEEXI/BlnkRgEkm247H503tNatPQFA1YGkob52EU18sV1eJ+OFRBLA==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.12.0.tgz", + "integrity": "sha512-rO2cZCt00Opk66QBZb7IBGfCq4ZE3EiuGkXssf2Monb5urujy0r8CknK2i7bzaKtPbd2vlvhmLP4CEHQqF6SLQ==", "requires": { - "@algolia/client-common": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" + "@algolia/client-common": "4.12.0", + "@algolia/client-search": "4.12.0", + "@algolia/requester-common": "4.12.0", + "@algolia/transporter": "4.12.0" } }, "@algolia/client-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.8.6.tgz", - "integrity": "sha512-8dI+K3Nvbes2YRZm2LY7bdCUD05e60BhacrMLxFuKxnBGuNehME1wbxq/QxcG1iNFJlxLIze5TxIcNN3+pn76g==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.12.0.tgz", + "integrity": "sha512-fcrFN7FBmxiSyjeu3sF4OnPkC1l7/8oyQ8RMM8CHpVY8cad6/ay35MrfRfgfqdzdFA8LzcBYO7fykuJv0eOqxw==", "requires": { - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" + "@algolia/requester-common": "4.12.0", + "@algolia/transporter": "4.12.0" } }, - "@algolia/client-recommendation": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.8.6.tgz", - "integrity": "sha512-Kg8DpjwvaWWujNx6sAUrSL+NTHxFe/UNaliCcSKaMhd3+FiPXN+CrSkO0KWR7I+oK2qGBTG/2Y0BhFOJ5/B/RA==", + "@algolia/client-personalization": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.12.0.tgz", + "integrity": "sha512-wCJfSQEmX6ZOuJBJGjy+sbXiW0iy7tMNAhsVMV9RRaJE4727e5WAqwFWZssD877WQ74+/nF/VyTaB1+wejo33Q==", "requires": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" + "@algolia/client-common": "4.12.0", + "@algolia/requester-common": "4.12.0", + "@algolia/transporter": "4.12.0" } }, "@algolia/client-search": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.8.6.tgz", - "integrity": "sha512-vXLS6umL/9G3bwqc6pkrS9K5/s8coq55mpfRARL+bs0NsToOf77WSTdwzlxv/KdbVF7dHjXgUpBvJ6RyR4ZdAw==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.12.0.tgz", + "integrity": "sha512-ik6dswcTQtOdZN+8aKntI9X2E6Qpqjtyda/+VANiHThY9GD2PBXuNuuC2HvlF26AbBYp5xaSE/EKxn1DIiIJ4Q==", "requires": { - "@algolia/client-common": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/transporter": "4.8.6" + "@algolia/client-common": "4.12.0", + "@algolia/requester-common": "4.12.0", + "@algolia/transporter": "4.12.0" } }, "@algolia/logger-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.8.6.tgz", - "integrity": "sha512-FMRxZGdDxSzd0/Mv0R1021FvUt0CcbsQLYeyckvSWX8w+Uk4o0lcV6UtZdERVR5XZsGOqoXLMIYDbR2vkbGbVw==" + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.12.0.tgz", + "integrity": "sha512-V//9rzLdJujA3iZ/tPhmKR/m2kjSZrymxOfUiF3024u2/7UyOpH92OOCrHUf023uMGYHRzyhBz5ESfL1oCdh7g==" }, "@algolia/logger-console": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.8.6.tgz", - "integrity": "sha512-TYw9lwUCjvApC6Z0zn36T6gkCl7hbfJmnU+Z/D8pFJ3Yp7lz06S3oWGjbdrULrYP1w1VOhjd0X7/yGNsMhzutQ==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.12.0.tgz", + "integrity": "sha512-pHvoGv53KXRIJHLk9uxBwKirwEo12G9+uo0sJLWESThAN3v5M+ycliU1AkUXQN8+9rds2KxfULAb+vfyfBKf8A==", "requires": { - "@algolia/logger-common": "4.8.6" + "@algolia/logger-common": "4.12.0" } }, "@algolia/requester-browser-xhr": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.8.6.tgz", - "integrity": "sha512-omh6uJ3CJXOmcrU9M3/KfGg8XkUuGJGIMkqEbkFvIebpBJxfs6TVs0ziNeMFAcAfhi8/CGgpLbDSgJtWdGQa6w==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.12.0.tgz", + "integrity": "sha512-rGlHNMM3jIZBwSpz33CVkeXHilzuzHuFXEEW1icP/k3KW7kwBrKFJwBy42RzAJa5BYlLsTCFTS3xkPhYwTQKLg==", "requires": { - "@algolia/requester-common": "4.8.6" + "@algolia/requester-common": "4.12.0" } }, "@algolia/requester-common": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.8.6.tgz", - "integrity": "sha512-r5xJqq/D9KACkI5DgRbrysVL5DUUagikpciH0k0zjBbm+cXiYfpmdflo/h6JnY6kmvWgjr/4DoeTjKYb/0deAQ==" + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.12.0.tgz", + "integrity": "sha512-qgfdc73nXqpVyOMr6CMTx3nXvud9dP6GcMGDqPct+fnxogGcJsp24cY2nMqUrAfgmTJe9Nmy7Lddv0FyHjONMg==" }, "@algolia/requester-node-http": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.8.6.tgz", - "integrity": "sha512-TB36OqTVOKyHCOtdxhn/IJyI/NXi/BWy8IEbsiWwwZWlL79NWHbetj49jXWFolEYEuu8PgDjjZGpRhypSuO9XQ==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.12.0.tgz", + "integrity": "sha512-mOTRGf/v/dXshBoZKNhMG00ZGxoUH9QdSpuMKYnuWwIgstN24uj3DQx+Ho3c+uq0TYfq7n2v71uoJWuiW32NMQ==", "requires": { - "@algolia/requester-common": "4.8.6" + "@algolia/requester-common": "4.12.0" } }, "@algolia/transporter": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.8.6.tgz", - "integrity": "sha512-NRb31J0TP7EPoVMpXZ4yAtr61d26R8KGaf6qdULknvq5sOVHuuH4PwmF08386ERfIsgnM/OBhl+uzwACdCIjSg==", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.12.0.tgz", + "integrity": "sha512-MOQVHZ4BcBpf3LtOY/3fqXHAcvI8MahrXDHk9QrBE/iGensQhDiZby5Dn3o2JN/zd9FMnVbdPQ8gnkiMwZiakQ==", "requires": { - "@algolia/cache-common": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/requester-common": "4.8.6" + "@algolia/cache-common": "4.12.0", + "@algolia/logger-common": "4.12.0", + "@algolia/requester-common": "4.12.0" } }, "@babel/code-frame": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz", - "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", + "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", "requires": { - "@babel/highlight": "^7.12.13" + "@babel/highlight": "^7.16.7" } }, "@babel/compat-data": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz", - "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA==" + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.16.8.tgz", + "integrity": "sha512-m7OkX0IdKLKPpBlJtF561YJal5y/jyI5fNfWbPxh2D/nbzzGI4qRyrD8xO2jB24u7l+5I2a43scCG2IrfjC50Q==" }, "@babel/core": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.15.tgz", - "integrity": "sha512-6GXmNYeNjS2Uz+uls5jalOemgIhnTMeaXo+yBUA72kC2uX/8VW6XyhVIo2L8/q0goKQA3EVKx0KOQpVKSeWadQ==", - "requires": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-module-transforms": "^7.13.14", - "@babel/helpers": "^7.13.10", - "@babel/parser": "^7.13.15", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.15", - "@babel/types": "^7.13.14", + "version": "7.16.12", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.16.12.tgz", + "integrity": "sha512-dK5PtG1uiN2ikk++5OzSYsitZKny4wOCD0nrO4TqnW4BVBTQ2NGS3NgilvT/TEyxTST7LNyWV/T4tXDoD3fOgg==", + "requires": { + "@babel/code-frame": "^7.16.7", + "@babel/generator": "^7.16.8", + "@babel/helper-compilation-targets": "^7.16.7", + "@babel/helper-module-transforms": "^7.16.7", + "@babel/helpers": "^7.16.7", + "@babel/parser": "^7.16.12", + "@babel/template": "^7.16.7", + "@babel/traverse": "^7.16.10", + "@babel/types": "^7.16.8", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -12688,9 +155,9 @@ }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } @@ -12716,11 +183,11 @@ } }, "@babel/generator": { - "version": "7.13.9", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.9.tgz", - "integrity": "sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.16.8.tgz", + "integrity": "sha512-1ojZwE9+lOXzcWdWmO6TbUzDfqLD39CmEhN8+2cX9XkDo5yW1OpgfejfliysR2AWLpMamTiOiAp/mtroaymhpw==", "requires": { - "@babel/types": "^7.13.0", + "@babel/types": "^7.16.8", "jsesc": "^2.5.1", "source-map": "^0.5.0" }, @@ -12733,58 +200,60 @@ } }, "@babel/helper-annotate-as-pure": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz", - "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.7.tgz", + "integrity": "sha512-s6t2w/IPQVTAET1HitoowRGXooX8mCgtuP5195wD/QJPV6wYjpujCGF7JuMODVX2ZAJOf1GT6DT9MHEZvLOFSw==", "requires": { - "@babel/types": "^7.12.13" + "@babel/types": "^7.16.7" } }, "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz", - "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz", + "integrity": "sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA==", "requires": { - "@babel/helper-explode-assignable-expression": "^7.12.13", - "@babel/types": "^7.12.13" + "@babel/helper-explode-assignable-expression": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/helper-compilation-targets": { - "version": "7.13.13", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.13.tgz", - "integrity": "sha512-q1kcdHNZehBwD9jYPh3WyXcsFERi39X4I59I3NadciWtNDyZ6x+GboOxncFK0kXlKIv6BJm5acncehXWUjWQMQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.16.7.tgz", + "integrity": "sha512-mGojBwIWcwGD6rfqgRXVlVYmPAv7eOpIemUG3dGnDdCY4Pae70ROij3XmfrH6Fa1h1aiDylpglbZyktfzyo/hA==", "requires": { - "@babel/compat-data": "^7.13.12", - "@babel/helper-validator-option": "^7.12.17", - "browserslist": "^4.14.5", + "@babel/compat-data": "^7.16.4", + "@babel/helper-validator-option": "^7.16.7", + "browserslist": "^4.17.5", "semver": "^6.3.0" } }, "@babel/helper-create-class-features-plugin": { - "version": "7.13.11", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz", - "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==", + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.16.10.tgz", + "integrity": "sha512-wDeej0pu3WN/ffTxMNCPW5UCiOav8IcLRxSIyp/9+IF2xJUM9h/OYjg0IJLHaL6F8oU8kqMz9nc1vryXhMsgXg==", "requires": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-member-expression-to-functions": "^7.13.0", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13" + "@babel/helper-annotate-as-pure": "^7.16.7", + "@babel/helper-environment-visitor": "^7.16.7", + "@babel/helper-function-name": "^7.16.7", + "@babel/helper-member-expression-to-functions": "^7.16.7", + "@babel/helper-optimise-call-expression": "^7.16.7", + "@babel/helper-replace-supers": "^7.16.7", + "@babel/helper-split-export-declaration": "^7.16.7" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz", - "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.16.7.tgz", + "integrity": "sha512-fk5A6ymfp+O5+p2yCkXAu5Kyj6v0xh0RBeNcAkYUMDvvAAoxvSKXn+Jb37t/yWFiQVDFK1ELpUTD8/aLhCPu+g==", "requires": { - "@babel/helper-annotate-as-pure": "^7.12.13", + "@babel/helper-annotate-as-pure": "^7.16.7", "regexpu-core": "^4.7.1" } }, "@babel/helper-define-polyfill-provider": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz", - "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==", + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz", + "integrity": "sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA==", "requires": { "@babel/helper-compilation-targets": "^7.13.0", "@babel/helper-module-imports": "^7.12.13", @@ -12797,9 +266,9 @@ }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } @@ -12811,316 +280,353 @@ } } }, + "@babel/helper-environment-visitor": { + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz", + "integrity": "sha512-SLLb0AAn6PkUeAfKJCCOl9e1R53pQlGAfc4y4XuMRZfqeMYLE0dM1LMhqbGAlGQY0lfw5/ohoYWAe9V1yibRag==", + "requires": { + "@babel/types": "^7.16.7" + } + }, "@babel/helper-explode-assignable-expression": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz", - "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz", + "integrity": "sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ==", "requires": { - "@babel/types": "^7.13.0" + "@babel/types": "^7.16.7" } }, "@babel/helper-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz", - "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.16.7.tgz", + "integrity": "sha512-QfDfEnIUyyBSR3HtrtGECuZ6DAyCkYFp7GHl75vFtTnn6pjKeK0T1DB5lLkFvBea8MdaiUABx3osbgLyInoejA==", "requires": { - "@babel/helper-get-function-arity": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/types": "^7.12.13" + "@babel/helper-get-function-arity": "^7.16.7", + "@babel/template": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/helper-get-function-arity": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz", - "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.16.7.tgz", + "integrity": "sha512-flc+RLSOBXzNzVhcLu6ujeHUrD6tANAOU5ojrRx/as+tbzf8+stUCj7+IfRRoAbEZqj/ahXEMsjhOhgeZsrnTw==", "requires": { - "@babel/types": "^7.12.13" + "@babel/types": "^7.16.7" } }, "@babel/helper-hoist-variables": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.0.tgz", - "integrity": "sha512-0kBzvXiIKfsCA0y6cFEIJf4OdzfpRuNk4+YTeHZpGGc666SATFKTz6sRncwFnQk7/ugJ4dSrCj6iJuvW4Qwr2g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz", + "integrity": "sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg==", "requires": { - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" + "@babel/types": "^7.16.7" } }, "@babel/helper-member-expression-to-functions": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz", - "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.16.7.tgz", + "integrity": "sha512-VtJ/65tYiU/6AbMTDwyoXGPKHgTsfRarivm+YbB5uAzKUyuPjgZSgAFeG87FCigc7KNHu2Pegh1XIT3lXjvz3Q==", "requires": { - "@babel/types": "^7.13.12" + "@babel/types": "^7.16.7" } }, "@babel/helper-module-imports": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz", - "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.16.7.tgz", + "integrity": "sha512-LVtS6TqjJHFc+nYeITRo6VLXve70xmq7wPhWTqDJusJEgGmkAACWwMiTNrvfoQo6hEhFwAIixNkvB0jPXDL8Wg==", "requires": { - "@babel/types": "^7.13.12" + "@babel/types": "^7.16.7" } }, "@babel/helper-module-transforms": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz", - "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.16.7.tgz", + "integrity": "sha512-gaqtLDxJEFCeQbYp9aLAefjhkKdjKcdh6DB7jniIGU3Pz52WAmP268zK0VgPz9hUNkMSYeH976K2/Y6yPadpng==", "requires": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-replace-supers": "^7.13.12", - "@babel/helper-simple-access": "^7.13.12", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/helper-validator-identifier": "^7.12.11", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.13", - "@babel/types": "^7.13.14" + "@babel/helper-environment-visitor": "^7.16.7", + "@babel/helper-module-imports": "^7.16.7", + "@babel/helper-simple-access": "^7.16.7", + "@babel/helper-split-export-declaration": "^7.16.7", + "@babel/helper-validator-identifier": "^7.16.7", + "@babel/template": "^7.16.7", + "@babel/traverse": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/helper-optimise-call-expression": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz", - "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz", + "integrity": "sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w==", "requires": { - "@babel/types": "^7.12.13" + "@babel/types": "^7.16.7" } }, "@babel/helper-plugin-utils": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz", - "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ==" + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.16.7.tgz", + "integrity": "sha512-Qg3Nk7ZxpgMrsox6HreY1ZNKdBq7K72tDSliA6dCl5f007jR4ne8iD5UzuNnCJH2xBf2BEEVGr+/OL6Gdp7RxA==" }, "@babel/helper-remap-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz", - "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz", + "integrity": "sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw==", "requires": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-wrap-function": "^7.13.0", - "@babel/types": "^7.13.0" + "@babel/helper-annotate-as-pure": "^7.16.7", + "@babel/helper-wrap-function": "^7.16.8", + "@babel/types": "^7.16.8" } }, "@babel/helper-replace-supers": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz", - "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.16.7.tgz", + "integrity": "sha512-y9vsWilTNaVnVh6xiJfABzsNpgDPKev9HnAgz6Gb1p6UUwf9NepdlsV7VXGCftJM+jqD5f7JIEubcpLjZj5dBw==", "requires": { - "@babel/helper-member-expression-to-functions": "^7.13.12", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.12" + "@babel/helper-environment-visitor": "^7.16.7", + "@babel/helper-member-expression-to-functions": "^7.16.7", + "@babel/helper-optimise-call-expression": "^7.16.7", + "@babel/traverse": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/helper-simple-access": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz", - "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.16.7.tgz", + "integrity": "sha512-ZIzHVyoeLMvXMN/vok/a4LWRy8G2v205mNP0XOuf9XRLyX5/u9CnVulUtDgUTama3lT+bf/UqucuZjqiGuTS1g==", "requires": { - "@babel/types": "^7.13.12" + "@babel/types": "^7.16.7" } }, "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.12.1", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", - "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz", + "integrity": "sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw==", "requires": { - "@babel/types": "^7.12.1" + "@babel/types": "^7.16.0" } }, "@babel/helper-split-export-declaration": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz", - "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz", + "integrity": "sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw==", "requires": { - "@babel/types": "^7.12.13" + "@babel/types": "^7.16.7" } }, "@babel/helper-validator-identifier": { - "version": "7.12.11", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz", - "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw==" + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz", + "integrity": "sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw==" }, "@babel/helper-validator-option": { - "version": "7.12.17", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz", - "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw==" + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.16.7.tgz", + "integrity": "sha512-TRtenOuRUVo9oIQGPC5G9DgK4743cdxvtOw0weQNpZXaS16SCBi5MNjZF8vba3ETURjZpTbVn7Vvcf2eAwFozQ==" }, "@babel/helper-wrap-function": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz", - "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz", + "integrity": "sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw==", "requires": { - "@babel/helper-function-name": "^7.12.13", - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" + "@babel/helper-function-name": "^7.16.7", + "@babel/template": "^7.16.7", + "@babel/traverse": "^7.16.8", + "@babel/types": "^7.16.8" } }, "@babel/helpers": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.10.tgz", - "integrity": "sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.16.7.tgz", + "integrity": "sha512-9ZDoqtfY7AuEOt3cxchfii6C7GDyyMBffktR5B2jvWv8u2+efwvpnVKXMWzNehqy68tKgAfSwfdw/lWpthS2bw==", "requires": { - "@babel/template": "^7.12.13", - "@babel/traverse": "^7.13.0", - "@babel/types": "^7.13.0" + "@babel/template": "^7.16.7", + "@babel/traverse": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/highlight": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz", - "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==", + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", + "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", "requires": { - "@babel/helper-validator-identifier": "^7.12.11", + "@babel/helper-validator-identifier": "^7.16.7", "chalk": "^2.0.0", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.15.tgz", - "integrity": "sha512-b9COtcAlVEQljy/9fbcMHpG+UIW9ReF+gpaxDHTlZd0c6/UU9ng8zdySAW9sRTzpvcdCHn6bUcbuYUgGzLAWVQ==" + "version": "7.16.12", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.16.12.tgz", + "integrity": "sha512-VfaV15po8RiZssrkPweyvbGVSe4x2y+aciFCgn0n0/SJMR22cwofRV1mtnJQYcSB1wUTaA/X1LnA3es66MCO5A==" + }, + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.16.7.tgz", + "integrity": "sha512-anv/DObl7waiGEnC24O9zqL0pSuI9hljihqiDuFHC8d7/bjr/4RLGPWuc8rYOff/QPzbEPSkzG8wGG9aDuhHRg==", + "requires": { + "@babel/helper-plugin-utils": "^7.16.7" + } }, "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz", - "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.16.7.tgz", + "integrity": "sha512-di8vUHRdf+4aJ7ltXhaDbPoszdkh59AQtJM5soLsuHpQJdFQZOA4uGj0V2u/CZ8bJ/u8ULDL5yq6FO/bCXnKHw==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", - "@babel/plugin-proposal-optional-chaining": "^7.13.12" + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", + "@babel/plugin-proposal-optional-chaining": "^7.16.7" } }, "@babel/plugin-proposal-async-generator-functions": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz", - "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.16.8.tgz", + "integrity": "sha512-71YHIvMuiuqWJQkebWJtdhQTfd4Q4mF76q2IX37uZPkG9+olBxsX+rH1vkhFto4UeJZ9dPY2s+mDvhDm1u2BGQ==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-remap-async-to-generator": "^7.16.8", "@babel/plugin-syntax-async-generators": "^7.8.4" } }, "@babel/plugin-proposal-class-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz", - "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.16.7.tgz", + "integrity": "sha512-IobU0Xme31ewjYOShSIqd/ZGM/r/cuOz2z0MDbNrhF5FW+ZVgi0f2lyeoj9KFPDOAqsYxmLWZte1WOwlvY9aww==", + "requires": { + "@babel/helper-create-class-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" + } + }, + "@babel/plugin-proposal-class-static-block": { + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.16.7.tgz", + "integrity": "sha512-dgqJJrcZoG/4CkMopzhPJjGxsIe9A8RlkQLnL/Vhhx8AA9ZuaRwGSlscSh42hazc7WSrya/IK7mTeoF0DP9tEw==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-create-class-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" } }, "@babel/plugin-proposal-decorators": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz", - "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.16.7.tgz", + "integrity": "sha512-DoEpnuXK14XV9btI1k8tzNGCutMclpj4yru8aXKoHlVmbO1s+2A+g2+h4JhcjrxkFJqzbymnLG6j/niOf3iFXQ==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.13.11", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/plugin-syntax-decorators": "^7.12.13" + "@babel/helper-create-class-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/plugin-syntax-decorators": "^7.16.7" } }, "@babel/plugin-proposal-dynamic-import": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz", - "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz", + "integrity": "sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-dynamic-import": "^7.8.3" } }, "@babel/plugin-proposal-export-namespace-from": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz", - "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.16.7.tgz", + "integrity": "sha512-ZxdtqDXLRGBL64ocZcs7ovt71L3jhC1RGSyR996svrCi3PYqHNkb3SwPJCs8RIzD86s+WPpt2S73+EHCGO+NUA==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-export-namespace-from": "^7.8.3" } }, "@babel/plugin-proposal-json-strings": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz", - "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.16.7.tgz", + "integrity": "sha512-lNZ3EEggsGY78JavgbHsK9u5P3pQaW7k4axlgFLYkMd7UBsiNahCITShLjNQschPyjtO6dADrL24757IdhBrsQ==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-json-strings": "^7.8.3" } }, "@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz", - "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.16.7.tgz", + "integrity": "sha512-K3XzyZJGQCr00+EtYtrDjmwX7o7PLK6U9bi1nCwkQioRFVUv6dJoxbQjtWVtP+bCPy82bONBKG8NPyQ4+i6yjg==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" } }, "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz", - "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.16.7.tgz", + "integrity": "sha512-aUOrYU3EVtjf62jQrCj63pYZ7k6vns2h/DQvHPWGmsJRYzWXZ6/AsfgpiRy6XiuIDADhJzP2Q9MwSMKauBQ+UQ==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" } }, "@babel/plugin-proposal-numeric-separator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz", - "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz", + "integrity": "sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-numeric-separator": "^7.10.4" } }, "@babel/plugin-proposal-object-rest-spread": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz", - "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.16.7.tgz", + "integrity": "sha512-3O0Y4+dw94HA86qSg9IHfyPktgR7q3gpNVAeiKQd+8jBKFaU5NQS1Yatgo4wY+UFNuLjvxcSmzcsHqrhgTyBUA==", "requires": { - "@babel/compat-data": "^7.13.8", - "@babel/helper-compilation-targets": "^7.13.8", - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/compat-data": "^7.16.4", + "@babel/helper-compilation-targets": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.13.0" + "@babel/plugin-transform-parameters": "^7.16.7" } }, "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz", - "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz", + "integrity": "sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-plugin-utils": "^7.16.7", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" } }, "@babel/plugin-proposal-optional-chaining": { - "version": "7.13.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz", - "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.16.7.tgz", + "integrity": "sha512-eC3xy+ZrUcBtP7x+sq62Q/HYd674pPTb/77XZMb5wbDPGWIdUbSr4Agr052+zaUPSb+gGRnjxXfKFvx5iMJ+DA==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", "@babel/plugin-syntax-optional-chaining": "^7.8.3" } }, "@babel/plugin-proposal-private-methods": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz", - "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==", + "version": "7.16.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.16.11.tgz", + "integrity": "sha512-F/2uAkPlXDr8+BHpZvo19w3hLFKge+k75XUprE6jaqKxjGkSYcK+4c+bup5PdW/7W/Rpjwql7FTVEDW+fRAQsw==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-create-class-features-plugin": "^7.16.10", + "@babel/helper-plugin-utils": "^7.16.7" + } + }, + "@babel/plugin-proposal-private-property-in-object": { + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.16.7.tgz", + "integrity": "sha512-rMQkjcOFbm+ufe3bTZLyOfsOUOxyvLXZJCTARhJr+8UMSoZmqTe1K1BgkFcrW37rAchWg57yI69ORxiWvUINuQ==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.16.7", + "@babel/helper-create-class-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" } }, "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz", - "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.16.7.tgz", + "integrity": "sha512-QRK0YI/40VLhNVGIjRNAAQkEHws0cswSdFFjpFyt943YmJIU1da9uW63Iu6NFV6CxTZW5eTDCrwZUstBWgp/Rg==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-create-regexp-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-syntax-async-generators": { @@ -13136,15 +642,23 @@ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.12.13" + } + }, + "@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" } }, "@babel/plugin-syntax-decorators": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz", - "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.16.7.tgz", + "integrity": "sha512-vQ+PxL+srA7g6Rx6I1e15m55gftknl2X8GCUW1JTlkTaXZLJOS0UcaY0eK9jYT7IYf4awn6qwyghVHLDz1WyMw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-syntax-dynamic-import": { @@ -13172,11 +686,11 @@ } }, "@babel/plugin-syntax-jsx": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz", - "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.16.7.tgz", + "integrity": "sha512-Esxmk7YjA8QysKeT3VhTXvF6y77f/a91SIs4pWb4H2eWGQkCKFgQaG6hdoEVZtGsrAcb2K5BW66XsOErD4WU3Q==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-syntax-logical-assignment-operators": { @@ -13227,332 +741,346 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, + "@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" + } + }, "@babel/plugin-syntax-top-level-await": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz", - "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==", + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.14.5" } }, "@babel/plugin-transform-arrow-functions": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz", - "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.16.7.tgz", + "integrity": "sha512-9ffkFFMbvzTvv+7dTp/66xvZAWASuPD5Tl9LK3Z9vhOmANo6j94rik+5YMBt4CwHVMWLWpMsriIc2zsa3WW3xQ==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-async-to-generator": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz", - "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.16.8.tgz", + "integrity": "sha512-MtmUmTJQHCnyJVrScNzNlofQJ3dLFuobYn3mwOTKHnSCMtbNsqvF71GQmJfFjdrXSsAA7iysFmYWw4bXZ20hOg==", "requires": { - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-remap-async-to-generator": "^7.13.0" + "@babel/helper-module-imports": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-remap-async-to-generator": "^7.16.8" } }, "@babel/plugin-transform-block-scoped-functions": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz", - "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz", + "integrity": "sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-block-scoping": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz", - "integrity": "sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.16.7.tgz", + "integrity": "sha512-ObZev2nxVAYA4bhyusELdo9hb3H+A56bxH3FZMbEImZFiEDYVHXQSJ1hQKFlDnlt8G9bBrCZ5ZpURZUrV4G5qQ==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-classes": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz", - "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.12.13", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-optimise-call-expression": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-replace-supers": "^7.13.0", - "@babel/helper-split-export-declaration": "^7.12.13", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.16.7.tgz", + "integrity": "sha512-WY7og38SFAGYRe64BrjKf8OrE6ulEHtr5jEYaZMwox9KebgqPi67Zqz8K53EKk1fFEJgm96r32rkKZ3qA2nCWQ==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.16.7", + "@babel/helper-environment-visitor": "^7.16.7", + "@babel/helper-function-name": "^7.16.7", + "@babel/helper-optimise-call-expression": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-replace-supers": "^7.16.7", + "@babel/helper-split-export-declaration": "^7.16.7", "globals": "^11.1.0" } }, "@babel/plugin-transform-computed-properties": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz", - "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.16.7.tgz", + "integrity": "sha512-gN72G9bcmenVILj//sv1zLNaPyYcOzUho2lIJBMh/iakJ9ygCo/hEF9cpGb61SCMEDxbbyBoVQxrt+bWKu5KGw==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-destructuring": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz", - "integrity": "sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.16.7.tgz", + "integrity": "sha512-VqAwhTHBnu5xBVDCvrvqJbtLUa++qZaWC0Fgr2mqokBlulZARGyIvZDoqbPlPaKImQ9dKAcCzbv+ul//uqu70A==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-dotall-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz", - "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz", + "integrity": "sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-create-regexp-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-duplicate-keys": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz", - "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.16.7.tgz", + "integrity": "sha512-03DvpbRfvWIXyK0/6QiR1KMTWeT6OcQ7tbhjrXyFS02kjuX/mu5Bvnh5SDSWHxyawit2g5aWhKwI86EE7GUnTw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-exponentiation-operator": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz", - "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz", + "integrity": "sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA==", "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-for-of": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz", - "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.16.7.tgz", + "integrity": "sha512-/QZm9W92Ptpw7sjI9Nx1mbcsWz33+l8kuMIQnDwgQBG5s3fAfQvkRjQ7NqXhtNcKOnPkdICmUHyCaWW06HCsqg==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-function-name": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz", - "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz", + "integrity": "sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA==", "requires": { - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-compilation-targets": "^7.16.7", + "@babel/helper-function-name": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz", - "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.16.7.tgz", + "integrity": "sha512-6tH8RTpTWI0s2sV6uq3e/C9wPo4PTqqZps4uF0kzQ9/xPLFQtipynvmT1g/dOfEJ+0EQsHhkQ/zyRId8J2b8zQ==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-member-expression-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz", - "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz", + "integrity": "sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-modules-amd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz", - "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.16.7.tgz", + "integrity": "sha512-KaaEtgBL7FKYwjJ/teH63oAmE3lP34N3kshz8mm4VMAw7U3PxjVwwUmxEFksbgsNUaO3wId9R2AVQYSEGRa2+g==", "requires": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", + "@babel/helper-module-transforms": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-commonjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz", - "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.16.8.tgz", + "integrity": "sha512-oflKPvsLT2+uKQopesJt3ApiaIS2HW+hzHFcwRNtyDGieAeC/dIHZX8buJQ2J2X1rxGPy4eRcUijm3qcSPjYcA==", "requires": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-simple-access": "^7.12.13", + "@babel/helper-module-transforms": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-simple-access": "^7.16.7", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-systemjs": { - "version": "7.13.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz", - "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==", - "requires": { - "@babel/helper-hoist-variables": "^7.13.0", - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-identifier": "^7.12.11", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.16.7.tgz", + "integrity": "sha512-DuK5E3k+QQmnOqBR9UkusByy5WZWGRxfzV529s9nPra1GE7olmxfqO2FHobEOYSPIjPBTr4p66YDcjQnt8cBmw==", + "requires": { + "@babel/helper-hoist-variables": "^7.16.7", + "@babel/helper-module-transforms": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-validator-identifier": "^7.16.7", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-umd": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz", - "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.16.7.tgz", + "integrity": "sha512-EMh7uolsC8O4xhudF2F6wedbSHm1HHZ0C6aJ7K67zcDNidMzVcxWdGr+htW9n21klm+bOn+Rx4CBsAntZd3rEQ==", "requires": { - "@babel/helper-module-transforms": "^7.13.0", - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-module-transforms": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz", - "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.16.8.tgz", + "integrity": "sha512-j3Jw+n5PvpmhRR+mrgIh04puSANCk/T/UA3m3P1MjJkhlK906+ApHhDIqBQDdOgL/r1UYpz4GNclTXxyZrYGSw==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13" + "@babel/helper-create-regexp-features-plugin": "^7.16.7" } }, "@babel/plugin-transform-new-target": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz", - "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.16.7.tgz", + "integrity": "sha512-xiLDzWNMfKoGOpc6t3U+etCE2yRnn3SM09BXqWPIZOBpL2gvVrBWUKnsJx0K/ADi5F5YC5f8APFfWrz25TdlGg==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-object-super": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz", - "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz", + "integrity": "sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13", - "@babel/helper-replace-supers": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-replace-supers": "^7.16.7" } }, "@babel/plugin-transform-parameters": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz", - "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.16.7.tgz", + "integrity": "sha512-AT3MufQ7zZEhU2hwOA11axBnExW0Lszu4RL/tAlUJBuNoRak+wehQW8h6KcXOcgjY42fHtDxswuMhMjFEuv/aw==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-property-literals": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz", - "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz", + "integrity": "sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-regenerator": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz", - "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.16.7.tgz", + "integrity": "sha512-mF7jOgGYCkSJagJ6XCujSQg+6xC1M77/03K2oBmVJWoFGNUtnVJO4WHKJk3dnPC8HCcj4xBQP1Egm8DWh3Pb3Q==", "requires": { "regenerator-transform": "^0.14.2" } }, "@babel/plugin-transform-reserved-words": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz", - "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.16.7.tgz", + "integrity": "sha512-KQzzDnZ9hWQBjwi5lpY5v9shmm6IVG0U9pB18zvMu2i4H90xpT4gmqwPYsn8rObiadYe2M0gmgsiOIF5A/2rtg==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-runtime": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz", - "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==", - "requires": { - "@babel/helper-module-imports": "^7.13.12", - "@babel/helper-plugin-utils": "^7.13.0", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.16.10.tgz", + "integrity": "sha512-9nwTiqETv2G7xI4RvXHNfpGdr8pAA+Q/YtN3yLK7OoK7n9OibVm/xymJ838a9A6E/IciOLPj82lZk0fW6O4O7w==", + "requires": { + "@babel/helper-module-imports": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "babel-plugin-polyfill-corejs2": "^0.3.0", + "babel-plugin-polyfill-corejs3": "^0.5.0", + "babel-plugin-polyfill-regenerator": "^0.3.0", "semver": "^6.3.0" } }, "@babel/plugin-transform-shorthand-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz", - "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz", + "integrity": "sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-spread": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz", - "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.16.7.tgz", + "integrity": "sha512-+pjJpgAngb53L0iaA5gU/1MLXJIfXcYepLgXB3esVRf4fqmj8f2cxM3/FKaHsZms08hFQJkFccEWuIpm429TXg==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0" } }, "@babel/plugin-transform-sticky-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz", - "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz", + "integrity": "sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-template-literals": { - "version": "7.13.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz", - "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.16.7.tgz", + "integrity": "sha512-VwbkDDUeenlIjmfNeDX/V0aWrQH2QiVyJtwymVQSzItFDTpxfyJh3EVaQiS0rIN/CqbLGr0VcGmuwyTdZtdIsA==", "requires": { - "@babel/helper-plugin-utils": "^7.13.0" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-typeof-symbol": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz", - "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.16.7.tgz", + "integrity": "sha512-p2rOixCKRJzpg9JB4gjnG4gjWkWa89ZoYUnl9snJ1cWIcTH/hvxZqfO+WjG6T8DRBpctEol5jw1O5rA8gkCokQ==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-unicode-escapes": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz", - "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz", + "integrity": "sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q==", "requires": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/plugin-transform-unicode-regex": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz", - "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz", + "integrity": "sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.12.13", - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-create-regexp-features-plugin": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7" } }, "@babel/preset-env": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz", - "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==", - "requires": { - "@babel/compat-data": "^7.13.15", - "@babel/helper-compilation-targets": "^7.13.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/helper-validator-option": "^7.12.17", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-async-generator-functions": "^7.13.15", - "@babel/plugin-proposal-class-properties": "^7.13.0", - "@babel/plugin-proposal-dynamic-import": "^7.13.8", - "@babel/plugin-proposal-export-namespace-from": "^7.12.13", - "@babel/plugin-proposal-json-strings": "^7.13.8", - "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8", - "@babel/plugin-proposal-numeric-separator": "^7.12.13", - "@babel/plugin-proposal-object-rest-spread": "^7.13.8", - "@babel/plugin-proposal-optional-catch-binding": "^7.13.8", - "@babel/plugin-proposal-optional-chaining": "^7.13.12", - "@babel/plugin-proposal-private-methods": "^7.13.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.12.13", + "version": "7.16.11", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.16.11.tgz", + "integrity": "sha512-qcmWG8R7ZW6WBRPZK//y+E3Cli151B20W1Rv7ln27vuPaXU/8TKms6jFdiJtF7UDTxcrb7mZd88tAeK9LjdT8g==", + "requires": { + "@babel/compat-data": "^7.16.8", + "@babel/helper-compilation-targets": "^7.16.7", + "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-validator-option": "^7.16.7", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.16.7", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.16.7", + "@babel/plugin-proposal-async-generator-functions": "^7.16.8", + "@babel/plugin-proposal-class-properties": "^7.16.7", + "@babel/plugin-proposal-class-static-block": "^7.16.7", + "@babel/plugin-proposal-dynamic-import": "^7.16.7", + "@babel/plugin-proposal-export-namespace-from": "^7.16.7", + "@babel/plugin-proposal-json-strings": "^7.16.7", + "@babel/plugin-proposal-logical-assignment-operators": "^7.16.7", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7", + "@babel/plugin-proposal-numeric-separator": "^7.16.7", + "@babel/plugin-proposal-object-rest-spread": "^7.16.7", + "@babel/plugin-proposal-optional-catch-binding": "^7.16.7", + "@babel/plugin-proposal-optional-chaining": "^7.16.7", + "@babel/plugin-proposal-private-methods": "^7.16.11", + "@babel/plugin-proposal-private-property-in-object": "^7.16.7", + "@babel/plugin-proposal-unicode-property-regex": "^7.16.7", "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", "@babel/plugin-syntax-dynamic-import": "^7.8.3", "@babel/plugin-syntax-export-namespace-from": "^7.8.3", "@babel/plugin-syntax-json-strings": "^7.8.3", @@ -13562,52 +1090,53 @@ "@babel/plugin-syntax-object-rest-spread": "^7.8.3", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-top-level-await": "^7.12.13", - "@babel/plugin-transform-arrow-functions": "^7.13.0", - "@babel/plugin-transform-async-to-generator": "^7.13.0", - "@babel/plugin-transform-block-scoped-functions": "^7.12.13", - "@babel/plugin-transform-block-scoping": "^7.12.13", - "@babel/plugin-transform-classes": "^7.13.0", - "@babel/plugin-transform-computed-properties": "^7.13.0", - "@babel/plugin-transform-destructuring": "^7.13.0", - "@babel/plugin-transform-dotall-regex": "^7.12.13", - "@babel/plugin-transform-duplicate-keys": "^7.12.13", - "@babel/plugin-transform-exponentiation-operator": "^7.12.13", - "@babel/plugin-transform-for-of": "^7.13.0", - "@babel/plugin-transform-function-name": "^7.12.13", - "@babel/plugin-transform-literals": "^7.12.13", - "@babel/plugin-transform-member-expression-literals": "^7.12.13", - "@babel/plugin-transform-modules-amd": "^7.13.0", - "@babel/plugin-transform-modules-commonjs": "^7.13.8", - "@babel/plugin-transform-modules-systemjs": "^7.13.8", - "@babel/plugin-transform-modules-umd": "^7.13.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13", - "@babel/plugin-transform-new-target": "^7.12.13", - "@babel/plugin-transform-object-super": "^7.12.13", - "@babel/plugin-transform-parameters": "^7.13.0", - "@babel/plugin-transform-property-literals": "^7.12.13", - "@babel/plugin-transform-regenerator": "^7.13.15", - "@babel/plugin-transform-reserved-words": "^7.12.13", - "@babel/plugin-transform-shorthand-properties": "^7.12.13", - "@babel/plugin-transform-spread": "^7.13.0", - "@babel/plugin-transform-sticky-regex": "^7.12.13", - "@babel/plugin-transform-template-literals": "^7.13.0", - "@babel/plugin-transform-typeof-symbol": "^7.12.13", - "@babel/plugin-transform-unicode-escapes": "^7.12.13", - "@babel/plugin-transform-unicode-regex": "^7.12.13", - "@babel/preset-modules": "^0.1.4", - "@babel/types": "^7.13.14", - "babel-plugin-polyfill-corejs2": "^0.2.0", - "babel-plugin-polyfill-corejs3": "^0.2.0", - "babel-plugin-polyfill-regenerator": "^0.2.0", - "core-js-compat": "^3.9.0", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-transform-arrow-functions": "^7.16.7", + "@babel/plugin-transform-async-to-generator": "^7.16.8", + "@babel/plugin-transform-block-scoped-functions": "^7.16.7", + "@babel/plugin-transform-block-scoping": "^7.16.7", + "@babel/plugin-transform-classes": "^7.16.7", + "@babel/plugin-transform-computed-properties": "^7.16.7", + "@babel/plugin-transform-destructuring": "^7.16.7", + "@babel/plugin-transform-dotall-regex": "^7.16.7", + "@babel/plugin-transform-duplicate-keys": "^7.16.7", + "@babel/plugin-transform-exponentiation-operator": "^7.16.7", + "@babel/plugin-transform-for-of": "^7.16.7", + "@babel/plugin-transform-function-name": "^7.16.7", + "@babel/plugin-transform-literals": "^7.16.7", + "@babel/plugin-transform-member-expression-literals": "^7.16.7", + "@babel/plugin-transform-modules-amd": "^7.16.7", + "@babel/plugin-transform-modules-commonjs": "^7.16.8", + "@babel/plugin-transform-modules-systemjs": "^7.16.7", + "@babel/plugin-transform-modules-umd": "^7.16.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.16.8", + "@babel/plugin-transform-new-target": "^7.16.7", + "@babel/plugin-transform-object-super": "^7.16.7", + "@babel/plugin-transform-parameters": "^7.16.7", + "@babel/plugin-transform-property-literals": "^7.16.7", + "@babel/plugin-transform-regenerator": "^7.16.7", + "@babel/plugin-transform-reserved-words": "^7.16.7", + "@babel/plugin-transform-shorthand-properties": "^7.16.7", + "@babel/plugin-transform-spread": "^7.16.7", + "@babel/plugin-transform-sticky-regex": "^7.16.7", + "@babel/plugin-transform-template-literals": "^7.16.7", + "@babel/plugin-transform-typeof-symbol": "^7.16.7", + "@babel/plugin-transform-unicode-escapes": "^7.16.7", + "@babel/plugin-transform-unicode-regex": "^7.16.7", + "@babel/preset-modules": "^0.1.5", + "@babel/types": "^7.16.8", + "babel-plugin-polyfill-corejs2": "^0.3.0", + "babel-plugin-polyfill-corejs3": "^0.5.0", + "babel-plugin-polyfill-regenerator": "^0.3.0", + "core-js-compat": "^3.20.2", "semver": "^6.3.0" } }, "@babel/preset-modules": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz", - "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==", + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", + "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", "requires": { "@babel/helper-plugin-utils": "^7.0.0", "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", @@ -13617,42 +1146,44 @@ } }, "@babel/runtime": { - "version": "7.13.10", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.10.tgz", - "integrity": "sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.16.7.tgz", + "integrity": "sha512-9E9FJowqAsytyOY6LG+1KuueckRL+aQW+mKvXRXnuFGyRAyepJPmEo9vgMfXUA6O9u3IeEdv9MAkppFcaQwogQ==", "requires": { "regenerator-runtime": "^0.13.4" } }, "@babel/template": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz", - "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==", + "version": "7.16.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.16.7.tgz", + "integrity": "sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w==", "requires": { - "@babel/code-frame": "^7.12.13", - "@babel/parser": "^7.12.13", - "@babel/types": "^7.12.13" + "@babel/code-frame": "^7.16.7", + "@babel/parser": "^7.16.7", + "@babel/types": "^7.16.7" } }, "@babel/traverse": { - "version": "7.13.15", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.15.tgz", - "integrity": "sha512-/mpZMNvj6bce59Qzl09fHEs8Bt8NnpEDQYleHUPZQ3wXUMvXi+HJPLars68oAbmp839fGoOkv2pSL2z9ajCIaQ==", - "requires": { - "@babel/code-frame": "^7.12.13", - "@babel/generator": "^7.13.9", - "@babel/helper-function-name": "^7.12.13", - "@babel/helper-split-export-declaration": "^7.12.13", - "@babel/parser": "^7.13.15", - "@babel/types": "^7.13.14", + "version": "7.16.10", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.16.10.tgz", + "integrity": "sha512-yzuaYXoRJBGMlBhsMJoUW7G1UmSb/eXr/JHYM/MsOJgavJibLwASijW7oXBdw3NQ6T0bW7Ty5P/VarOs9cHmqw==", + "requires": { + "@babel/code-frame": "^7.16.7", + "@babel/generator": "^7.16.8", + "@babel/helper-environment-visitor": "^7.16.7", + "@babel/helper-function-name": "^7.16.7", + "@babel/helper-hoist-variables": "^7.16.7", + "@babel/helper-split-export-declaration": "^7.16.7", + "@babel/parser": "^7.16.10", + "@babel/types": "^7.16.8", "debug": "^4.1.0", "globals": "^11.1.0" }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } @@ -13665,12 +1196,11 @@ } }, "@babel/types": { - "version": "7.13.14", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.14.tgz", - "integrity": "sha512-A2aa3QTkWoyqsZZFl56MLUsfmh7O0gN41IPvXAE/++8ojpbz12SszD7JEGYVdn4f9Kt4amIei07swF1h4AqmmQ==", + "version": "7.16.8", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.16.8.tgz", + "integrity": "sha512-smN2DQc5s4M7fntyjGtyIPbRJv6wW4rU/94fmYJ7PKQuZkC0qGMHXJbg6sNGt12JmVr4k5YaptI/XtiLJBnmIg==", "requires": { - "@babel/helper-validator-identifier": "^7.12.11", - "lodash": "^4.17.19", + "@babel/helper-validator-identifier": "^7.16.7", "to-fast-properties": "^2.0.0" } }, @@ -13701,6 +1231,11 @@ "follow-redirects": "1.5.10" } }, + "clipboard-copy": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", + "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" + }, "entities": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", @@ -13747,34 +1282,206 @@ "defer-to-connect": "^1.0.1" } }, + "@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "requires": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "@types/connect": { + "version": "3.4.35", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", + "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "requires": { + "@types/node": "*" + } + }, + "@types/connect-history-api-fallback": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz", + "integrity": "sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==", + "requires": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "@types/express": { + "version": "4.17.13", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.13.tgz", + "integrity": "sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA==", + "requires": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.18", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "@types/express-serve-static-core": { + "version": "4.17.28", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.28.tgz", + "integrity": "sha512-P1BJAEAW3E2DJUlkgq4tOL3RyMunoWXqbSCygWo5ZIWTjUgN1YnaXWW4VWl/oc8vs/XoYibEGBKP0uZyF4AHig==", + "requires": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*" + } + }, "@types/glob": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", - "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==", "requires": { "@types/minimatch": "*", "@types/node": "*" } }, + "@types/highlight.js": { + "version": "9.12.4", + "resolved": "https://registry.npmjs.org/@types/highlight.js/-/highlight.js-9.12.4.tgz", + "integrity": "sha512-t2szdkwmg2JJyuCM20e8kR2X59WCE5Zkl4bzm1u1Oukjm79zpbiAv+QjnwLnuuV0WHEcX2NgUItu0pAMKuOPww==" + }, + "@types/http-proxy": { + "version": "1.17.8", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.8.tgz", + "integrity": "sha512-5kPLG5BKpWYkw/LVOGWpiq3nEVqxiN32rTgI53Sk12/xHFQ2rG3ehI9IO+O3W2QoKeyB92dJkoka8SUm6BX1pA==", + "requires": { + "@types/node": "*" + } + }, "@types/json-schema": { - "version": "7.0.7", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz", - "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA==" + "version": "7.0.9", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.9.tgz", + "integrity": "sha512-qcUXuemtEu+E5wZSJHNxUXeCZhAfXKQ41D+duX+VYPde7xyEVZci+/oXKJL13tnRs9lR2pr4fod59GT6/X1/yQ==" + }, + "@types/linkify-it": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.2.tgz", + "integrity": "sha512-HZQYqbiFVWufzCwexrvh694SOim8z2d+xJl5UNamcvQFejLY/2YUtzXHYi3cHdI7PMlS8ejH2slRAOJQ32aNbA==" + }, + "@types/markdown-it": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-10.0.3.tgz", + "integrity": "sha512-daHJk22isOUvNssVGF2zDnnSyxHhFYhtjeX4oQaKD6QzL3ZR1QSgiD1g+Q6/WSWYVogNXYDXODtbgW/WiFCtyw==", + "requires": { + "@types/highlight.js": "^9.7.0", + "@types/linkify-it": "*", + "@types/mdurl": "*", + "highlight.js": "^9.7.0" + } + }, + "@types/mdurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.2.tgz", + "integrity": "sha512-eC4U9MlIcu2q0KQmXszyn5Akca/0jrQmwDRgpAMJai7qBWq4amIQhZyNau4VYGtCeALvW1/NtjzJJ567aZxfKA==" + }, + "@types/mime": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", + "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==" }, "@types/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA==" + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.5.tgz", + "integrity": "sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==" }, "@types/node": { - "version": "14.14.37", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.37.tgz", - "integrity": "sha512-XYmBiy+ohOR4Lh5jE379fV2IU+6Jn4g5qASinhitfyO71b/sCo6MKsMLF5tc7Zf2CE8hViVQyYSobJNke8OvUw==" + "version": "17.0.10", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz", + "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==" }, "@types/q": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", - "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.5.tgz", + "integrity": "sha512-L28j2FcJfSZOnL1WBjDYp2vUHCeIFlyYI/53EwD/rKUBQ7MtUUfbQWiyKJGpcnv4/WgrhWsFKrcPstcAt/J0tQ==" + }, + "@types/qs": { + "version": "6.9.7", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", + "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" + }, + "@types/range-parser": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", + "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" + }, + "@types/serve-static": { + "version": "1.13.10", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz", + "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==", + "requires": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "@types/source-list-map": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@types/source-list-map/-/source-list-map-0.1.2.tgz", + "integrity": "sha512-K5K+yml8LTo9bWJI/rECfIPrGgxdpeNbj+d53lwN4QjW1MCwlkhUms+gtdzigTeUyBr09+u8BwOIY3MXvHdcsA==" + }, + "@types/tapable": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/tapable/-/tapable-1.0.8.tgz", + "integrity": "sha512-ipixuVrh2OdNmauvtT51o3d8z12p6LtFW9in7U79der/kwejjdNchQC5UMn5u/KxNoM7VHHOs/l8KS8uHxhODQ==" + }, + "@types/uglify-js": { + "version": "3.13.1", + "resolved": "https://registry.npmjs.org/@types/uglify-js/-/uglify-js-3.13.1.tgz", + "integrity": "sha512-O3MmRAk6ZuAKa9CHgg0Pr0+lUOqoMLpc9AS4R8ano2auvsg7IE8syF3Xh/NPr26TWklxYcqoEEFdzLLs1fV9PQ==", + "requires": { + "source-map": "^0.6.1" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + } + } + }, + "@types/webpack": { + "version": "4.41.32", + "resolved": "https://registry.npmjs.org/@types/webpack/-/webpack-4.41.32.tgz", + "integrity": "sha512-cb+0ioil/7oz5//7tZUSwbrSAN/NWHrQylz5cW8G0dWTcF/g+/dSdMlKVZspBYuMAN1+WnwHrkxiRrLcwd0Heg==", + "requires": { + "@types/node": "*", + "@types/tapable": "^1", + "@types/uglify-js": "*", + "@types/webpack-sources": "*", + "anymatch": "^3.0.0", + "source-map": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + } + } + }, + "@types/webpack-dev-server": { + "version": "3.11.6", + "resolved": "https://registry.npmjs.org/@types/webpack-dev-server/-/webpack-dev-server-3.11.6.tgz", + "integrity": "sha512-XCph0RiiqFGetukCTC3KVnY1jwLcZ84illFRMbyFzCcWl90B/76ew0tSqF46oBhnLC4obNDG7dMO0JfTN0MgMQ==", + "requires": { + "@types/connect-history-api-fallback": "*", + "@types/express": "*", + "@types/serve-static": "*", + "@types/webpack": "^4", + "http-proxy-middleware": "^1.0.0" + } + }, + "@types/webpack-sources": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@types/webpack-sources/-/webpack-sources-3.2.0.tgz", + "integrity": "sha512-Ft7YH3lEVRQ6ls8k4Ff1oB4jN6oy/XmU6tQISKdhfh+1mR+viZFphS6WL0IrtDOzvefmJg5a0s7ZQoRXwqTEFg==", + "requires": { + "@types/node": "*", + "@types/source-list-map": "*", + "source-map": "^0.7.3" + } }, "@vue/babel-helper-vue-jsx-merge-props": { "version": "1.2.1", @@ -13787,9 +1494,9 @@ "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA==" }, "@vue/babel-plugin-jsx": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.4.tgz", - "integrity": "sha512-Vu5gsabUdsiWc4vQarg46xWJGs8pMEJyyMQAKA1vO+F4+aR4/jaxWxPCOvZ7XvVyy+ecSbwQp/qIyDVje360UQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.1.1.tgz", + "integrity": "sha512-j2uVfZjnB5+zkcbc/zsOc0fSNGCMMjaEXP52wdwdIfn0qjFfEYpYZBFKFg+HHnQeJCVrjOeO0YxgaL7DMrym9w==", "requires": { "@babel/helper-module-imports": "^7.0.0", "@babel/plugin-syntax-jsx": "^7.0.0", @@ -13823,9 +1530,9 @@ } }, "@vue/babel-preset-app": { - "version": "4.5.12", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz", - "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==", + "version": "4.5.15", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.15.tgz", + "integrity": "sha512-J+YttzvwRfV1BPczf8r3qCevznYk+jh531agVF+5EYlHF4Sgh/cGXTz9qkkiux3LQgvhEGXgmCteg1n38WuuKg==", "requires": { "@babel/core": "^7.11.0", "@babel/helper-compilation-targets": "^7.9.6", @@ -13935,17 +1642,17 @@ } }, "@vue/component-compiler-utils": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz", - "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.3.0.tgz", + "integrity": "sha512-97sfH2mYNU+2PzGrmK2haqffDpVASuib9/w2/noxiFi31Z54hW+q3izKQXXQZSNhtiUpAI36uSuYepeBe4wpHQ==", "requires": { "consolidate": "^0.15.1", "hash-sum": "^1.0.2", "lru-cache": "^4.1.2", "merge-source-map": "^1.1.0", - "postcss": "^7.0.14", + "postcss": "^7.0.36", "postcss-selector-parser": "^6.0.2", - "prettier": "^1.18.2", + "prettier": "^1.18.2 || ^2.0.0", "source-map": "~0.6.1", "vue-template-es2015-compiler": "^1.9.0" }, @@ -13972,19 +1679,21 @@ } }, "@vuepress/core": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz", - "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.9.7.tgz", + "integrity": "sha512-u5eb1mfNLV8uG2UuxlvpB/FkrABxeMHqymTsixOnsOg2REziv9puEIbqaZ5BjLPvbCDvSj6rn+DwjENmBU+frQ==", "requires": { "@babel/core": "^7.8.4", "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "1.8.2", - "@vuepress/markdown-loader": "1.8.2", - "@vuepress/plugin-last-updated": "1.8.2", - "@vuepress/plugin-register-components": "1.8.2", - "@vuepress/shared-utils": "1.8.2", + "@vuepress/markdown": "1.9.7", + "@vuepress/markdown-loader": "1.9.7", + "@vuepress/plugin-last-updated": "1.9.7", + "@vuepress/plugin-register-components": "1.9.7", + "@vuepress/shared-utils": "1.9.7", + "@vuepress/types": "1.9.7", "autoprefixer": "^9.5.1", "babel-loader": "^8.0.4", + "bundle-require": "2.1.8", "cache-loader": "^3.0.0", "chokidar": "^2.0.3", "connect-history-api-fallback": "^1.5.0", @@ -13992,6 +1701,7 @@ "core-js": "^3.6.4", "cross-spawn": "^6.0.5", "css-loader": "^2.1.1", + "esbuild": "0.14.7", "file-loader": "^3.0.1", "js-yaml": "^3.13.1", "lru-cache": "^5.1.1", @@ -14017,11 +1727,11 @@ } }, "@vuepress/markdown": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz", - "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.9.7.tgz", + "integrity": "sha512-DFOjYkwV6fT3xXTGdTDloeIrT1AbwJ9pwefmrp0rMgC6zOz3XUJn6qqUwcYFO5mNBWpbiFQ3JZirCtgOe+xxBA==", "requires": { - "@vuepress/shared-utils": "1.8.2", + "@vuepress/shared-utils": "1.9.7", "markdown-it": "^8.4.1", "markdown-it-anchor": "^5.0.2", "markdown-it-chain": "^1.3.0", @@ -14050,61 +1760,68 @@ } }, "@vuepress/markdown-loader": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz", - "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.9.7.tgz", + "integrity": "sha512-mxXF8FtX/QhOg/UYbe4Pr1j5tcf/aOEI502rycTJ3WF2XAtOmewjkGV4eAA6f6JmuM/fwzOBMZKDyy9/yo2I6Q==", "requires": { - "@vuepress/markdown": "1.8.2", + "@vuepress/markdown": "1.9.7", "loader-utils": "^1.1.0", "lru-cache": "^5.1.1" } }, "@vuepress/plugin-active-header-links": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz", - "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.9.7.tgz", + "integrity": "sha512-G1M8zuV9Og3z8WBiKkWrofG44NEXsHttc1MYreDXfeWh/NLjr9q1GPCEXtiCjrjnHZHB3cSQTKnTqAHDq35PGA==", "requires": { + "@vuepress/types": "1.9.7", "lodash.debounce": "^4.0.8" } }, "@vuepress/plugin-google-analytics": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", - "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.8.2.tgz", + "integrity": "sha512-BMFayLzT2BvXmnhM9mDHw0UPU7J0pH1X9gQA4HmZxOf7f3+atK5eJGsc1Ia/+1FTG2ESvhFLUU/CC3h5arjEJw==" }, "@vuepress/plugin-last-updated": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz", - "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.9.7.tgz", + "integrity": "sha512-FiFBOl49dlFRjbLRnRAv77HDWfe+S/eCPtMQobq4/O3QWuL3Na5P4fCTTVzq1K7rWNO9EPsWNB2Jb26ndlQLKQ==", "requires": { + "@vuepress/types": "1.9.7", "cross-spawn": "^6.0.5" } }, "@vuepress/plugin-nprogress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz", - "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.9.7.tgz", + "integrity": "sha512-sI148igbdRfLgyzB8PdhbF51hNyCDYXsBn8bBWiHdzcHBx974sVNFKtfwdIZcSFsNrEcg6zo8YIrQ+CO5vlUhQ==", "requires": { + "@vuepress/types": "1.9.7", "nprogress": "^0.2.0" } }, "@vuepress/plugin-register-components": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz", - "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.9.7.tgz", + "integrity": "sha512-l/w1nE7Dpl+LPMb8+AHSGGFYSP/t5j6H4/Wltwc2QcdzO7yqwC1YkwwhtTXvLvHOV8O7+rDg2nzvq355SFkfKA==", "requires": { - "@vuepress/shared-utils": "1.8.2" + "@vuepress/shared-utils": "1.9.7", + "@vuepress/types": "1.9.7" } }, "@vuepress/plugin-search": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz", - "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA==" + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.9.7.tgz", + "integrity": "sha512-MLpbUVGLxaaHEwflFxvy0pF9gypFVUT3Q9Zc6maWE+0HDWAvzMxo6GBaj6mQPwjOqNQMf4QcN3hDzAZktA+DQg==", + "requires": { + "@vuepress/types": "1.9.7" + } }, "@vuepress/shared-utils": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz", - "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.9.7.tgz", + "integrity": "sha512-lIkO/eSEspXgVHjYHa9vuhN7DuaYvkfX1+TTJDiEYXIwgwqtvkTv55C+IOdgswlt0C/OXDlJaUe1rGgJJ1+FTw==", "requires": { "chalk": "^2.3.2", "escape-html": "^1.0.3", @@ -14118,19 +1835,52 @@ } }, "@vuepress/theme-default": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz", - "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==", - "requires": { - "@vuepress/plugin-active-header-links": "1.8.2", - "@vuepress/plugin-nprogress": "1.8.2", - "@vuepress/plugin-search": "1.8.2", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.9.7.tgz", + "integrity": "sha512-NZzCLIl+bgJIibhkqVmk/NSku57XIuXugxAN3uiJrCw6Mu6sb3xOvbk0En3k+vS2BKHxAZ6Cx7dbCiyknDQnSA==", + "requires": { + "@vuepress/plugin-active-header-links": "1.9.7", + "@vuepress/plugin-nprogress": "1.9.7", + "@vuepress/plugin-search": "1.9.7", + "@vuepress/types": "1.9.7", "docsearch.js": "^2.5.2", "lodash": "^4.17.15", "stylus": "^0.54.8", "stylus-loader": "^3.0.2", "vuepress-plugin-container": "^2.0.2", "vuepress-plugin-smooth-scroll": "^0.0.3" + }, + "dependencies": { + "mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" + }, + "stylus": { + "version": "0.54.8", + "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", + "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", + "requires": { + "css-parse": "~2.0.0", + "debug": "~3.1.0", + "glob": "^7.1.6", + "mkdirp": "~1.0.4", + "safer-buffer": "^2.1.2", + "sax": "~1.2.4", + "semver": "^6.3.0", + "source-map": "^0.7.3" + } + } + } + }, + "@vuepress/types": { + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@vuepress/types/-/types-1.9.7.tgz", + "integrity": "sha512-moLQzkX3ED2o18dimLemUm7UVDKxhcrJmGt5C0Ng3xxrLPaQu7UqbROtEKB3YnMRt4P/CA91J+Ck+b9LmGabog==", + "requires": { + "@types/markdown-it": "^10.0.0", + "@types/webpack-dev-server": "^3", + "webpack-chain": "^6.0.0" } }, "@webassemblyjs/ast": { @@ -14346,24 +2096,24 @@ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" }, "algoliasearch": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.8.6.tgz", - "integrity": "sha512-G8IA3lcgaQB4r9HuQ4G+uSFjjz0Wv2OgEPiQ8emA+G2UUlroOfMl064j1bq/G+QTW0LmTQp9JwrFDRWxFM9J7w==", - "requires": { - "@algolia/cache-browser-local-storage": "4.8.6", - "@algolia/cache-common": "4.8.6", - "@algolia/cache-in-memory": "4.8.6", - "@algolia/client-account": "4.8.6", - "@algolia/client-analytics": "4.8.6", - "@algolia/client-common": "4.8.6", - "@algolia/client-recommendation": "4.8.6", - "@algolia/client-search": "4.8.6", - "@algolia/logger-common": "4.8.6", - "@algolia/logger-console": "4.8.6", - "@algolia/requester-browser-xhr": "4.8.6", - "@algolia/requester-common": "4.8.6", - "@algolia/requester-node-http": "4.8.6", - "@algolia/transporter": "4.8.6" + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.12.0.tgz", + "integrity": "sha512-fZOMMm+F3Bi5M/MoFIz7hiuyCitJza0Hu+r8Wzz4LIQClC6YGMRq7kT6NNU1fSSoFDSeJIwMfedbbi5G9dJoVQ==", + "requires": { + "@algolia/cache-browser-local-storage": "4.12.0", + "@algolia/cache-common": "4.12.0", + "@algolia/cache-in-memory": "4.12.0", + "@algolia/client-account": "4.12.0", + "@algolia/client-analytics": "4.12.0", + "@algolia/client-common": "4.12.0", + "@algolia/client-personalization": "4.12.0", + "@algolia/client-search": "4.12.0", + "@algolia/logger-common": "4.12.0", + "@algolia/logger-console": "4.12.0", + "@algolia/requester-browser-xhr": "4.12.0", + "@algolia/requester-common": "4.12.0", + "@algolia/requester-node-http": "4.12.0", + "@algolia/transporter": "4.12.0" } }, "alphanum-sort": { @@ -14372,11 +2122,46 @@ "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM=" }, "ansi-align": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz", - "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", "requires": { - "string-width": "^3.0.0" + "string-width": "^4.1.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "requires": { + "ansi-regex": "^5.0.1" + } + } } }, "ansi-colors": { @@ -14392,10 +2177,10 @@ "type-fest": "^0.21.3" } }, - "ansi-html": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz", - "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4=" + "ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==" }, "ansi-regex": { "version": "2.1.1", @@ -14411,22 +2196,12 @@ } }, "anymatch": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", - "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", "requires": { - "micromatch": "^3.1.4", - "normalize-path": "^2.1.1" - }, - "dependencies": { - "normalize-path": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", - "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "requires": { - "remove-trailing-separator": "^1.0.1" - } - } + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" } }, "aproba": { @@ -14486,9 +2261,9 @@ "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY=" }, "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", "requires": { "safer-buffer": "~2.1.0" } @@ -14587,17 +2362,24 @@ } }, "autoprefixer": { - "version": "9.8.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", - "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", + "version": "9.8.8", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.8.tgz", + "integrity": "sha512-eM9d/swFopRt5gdJ7jrpCwgvEMIayITpojhkkSMRsFHYuH5bkSQ4p/9qTEHtmNudUZh22Tehu7I6CxAW0IXTKA==", "requires": { "browserslist": "^4.12.0", "caniuse-lite": "^1.0.30001109", - "colorette": "^1.2.1", "normalize-range": "^0.1.2", "num2fraction": "^1.2.2", + "picocolors": "^0.2.1", "postcss": "^7.0.32", "postcss-value-parser": "^4.1.0" + }, + "dependencies": { + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + } } }, "aws-sign2": { @@ -14611,24 +2393,24 @@ "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" }, "axios": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz", - "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==", + "version": "0.24.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.24.0.tgz", + "integrity": "sha512-Q6cWsys88HoPgAaFAVUb0WpPk0O8iTeisR9IMqy9G8AbO4NlpVknrnQS03zzF9PGAWgO3cgletO3VjV/P7VztA==", "requires": { - "follow-redirects": "^1.10.0" + "follow-redirects": "^1.14.4" }, "dependencies": { "follow-redirects": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.3.tgz", - "integrity": "sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA==" + "version": "1.14.7", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz", + "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==" } } }, "babel-loader": { - "version": "8.2.2", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz", - "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==", + "version": "8.2.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.3.tgz", + "integrity": "sha512-n4Zeta8NC3QAsuyiizu0GkmRcQ6clkV9WFUnUf1iXP//IeSKbWjofW3UHyZVwlOB4y039YQKefawyTn64Zwbuw==", "requires": { "find-cache-dir": "^3.3.1", "loader-utils": "^1.4.0", @@ -14645,30 +2427,30 @@ } }, "babel-plugin-polyfill-corejs2": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz", - "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==", + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz", + "integrity": "sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w==", "requires": { "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.2.0", + "@babel/helper-define-polyfill-provider": "^0.3.1", "semver": "^6.1.1" } }, "babel-plugin-polyfill-corejs3": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz", - "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==", + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.1.tgz", + "integrity": "sha512-TihqEe4sQcb/QcPJvxe94/9RZuLQuF1+To4WqQcRvc+3J3gLCPIPgDKzGLG6zmQLfH3nn25heRuDNkS2KR4I8A==", "requires": { - "@babel/helper-define-polyfill-provider": "^0.2.0", - "core-js-compat": "^3.9.1" + "@babel/helper-define-polyfill-provider": "^0.3.1", + "core-js-compat": "^3.20.0" } }, "babel-plugin-polyfill-regenerator": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz", - "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==", + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz", + "integrity": "sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A==", "requires": { - "@babel/helper-define-polyfill-provider": "^0.2.0" + "@babel/helper-define-polyfill-provider": "^0.3.1" } }, "babel-walk": { @@ -14782,26 +2564,26 @@ "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw==" }, "body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.1.tgz", + "integrity": "sha512-8ljfQi5eBk8EJfECMrgqNGWPEY5jWP+1IzkzkGdFFEwFQZZyaZ21UqdaHktgiMlH0xLHqIFtE/u2OYE5dOtViA==", "requires": { - "bytes": "3.1.0", + "bytes": "3.1.1", "content-type": "~1.0.4", "debug": "2.6.9", "depd": "~1.1.2", - "http-errors": "1.7.2", + "http-errors": "1.8.1", "iconv-lite": "0.4.24", "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" + "qs": "6.9.6", + "raw-body": "2.4.2", + "type-is": "~1.6.18" }, "dependencies": { "bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.1.tgz", + "integrity": "sha512-dWe4nWO/ruEOY7HkUJ5gFt1DCFV9zPRoJr8pV0/ASQermOZjtq8jMjOprC0Kd10GLN+l7xaUPvxzJFWtxGu8Fg==" }, "debug": { "version": "2.6.9", @@ -14847,9 +2629,9 @@ }, "dependencies": { "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" }, "ansi-styles": { "version": "4.3.0", @@ -14902,21 +2684,21 @@ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" }, "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" + "strip-ansi": "^6.0.1" } }, "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "ansi-regex": "^5.0.0" + "ansi-regex": "^5.0.1" } }, "supports-color": { @@ -15050,27 +2832,15 @@ } }, "browserslist": { - "version": "4.16.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.6.tgz", - "integrity": "sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ==", + "version": "4.19.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.19.1.tgz", + "integrity": "sha512-u2tbbG5PdKRTUoctO3NBD8FQ5HdPh1ZXPHzp1rwaa5jTc+RV9/+RlWiAIKmjRPQF+xbGM9Kklj5bZQFa2s/38A==", "requires": { - "caniuse-lite": "^1.0.30001219", - "colorette": "^1.2.2", - "electron-to-chromium": "^1.3.723", + "caniuse-lite": "^1.0.30001286", + "electron-to-chromium": "^1.4.17", "escalade": "^3.1.1", - "node-releases": "^1.1.71" - }, - "dependencies": { - "caniuse-lite": { - "version": "1.0.30001228", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz", - "integrity": "sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A==" - }, - "electron-to-chromium": { - "version": "1.3.738", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.738.tgz", - "integrity": "sha512-vCMf4gDOpEylPSLPLSwAEsz+R3ShP02Y3cAKMZvTqule3XcPp7tgc/0ESI7IS6ZeyBlGClE50N53fIOkcIVnpw==" - } + "node-releases": "^2.0.1", + "picocolors": "^1.0.0" } }, "buffer": { @@ -15084,9 +2854,9 @@ } }, "buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" }, "buffer-indexof": { "version": "1.1.1", @@ -15108,15 +2878,20 @@ "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=" }, + "bundle-require": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/bundle-require/-/bundle-require-2.1.8.tgz", + "integrity": "sha512-oOEg3A0hy/YzvNWNowtKD0pmhZKseOFweCbgyMqTIih4gRY1nJWsvrOCT27L9NbIyL5jMjTFrAUpGxxpW68Puw==" + }, "bytes": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "cac": { - "version": "6.7.2", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.2.tgz", - "integrity": "sha512-w0bH1IF9rEjdi0a6lTtlXYT+vBZEJL9oytaXXRdsD68MH6+SrZGOGsu7s2saHQvYXqwo/wBdkW75tt8wFpj+mw==" + "version": "6.7.12", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.12.tgz", + "integrity": "sha512-rM7E2ygtMkJqD9c7WnFU6fruFcN3xe4FM5yUmgxhZzIKJk4uHl9U/fhwdajGFQbQuv43FAUo1Fe8gX/oIKDeSA==" }, "cacache": { "version": "12.0.4", @@ -15138,16 +2913,6 @@ "ssri": "^6.0.1", "unique-filename": "^1.1.1", "y18n": "^4.0.0" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - } } }, "cache-base": { @@ -15215,14 +2980,6 @@ "semver": "^5.6.0" } }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - }, "p-locate": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", @@ -15289,9 +3046,9 @@ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" }, "normalize-url": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", - "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==" + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", + "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==" } } }, @@ -15340,9 +3097,9 @@ } }, "camelcase": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", - "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==" + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==" }, "caniuse-api": { "version": "3.0.0", @@ -15356,9 +3113,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001208", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001208.tgz", - "integrity": "sha512-OE5UE4+nBOro8Dyvv0lfx+SRtfVIOM9uhKqFmJeUbGriqhhStgp1A0OyBpgy3OUF8AhYCT+PVwPC1gMl2ZcQMA==" + "version": "1.0.30001301", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001301.tgz", + "integrity": "sha512-csfD/GpHMqgEL3V3uIgosvh+SVIQvCh43SNu9HRbP1lnxkKm1kjDG4f32PP571JplkLjfS+mg2p1gxR7MYrrIA==" }, "caseless": { "version": "0.12.0", @@ -15384,28 +3141,29 @@ } }, "cheerio": { - "version": "1.0.0-rc.6", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz", - "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==", + "version": "1.0.0-rc.10", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.10.tgz", + "integrity": "sha512-g0J0q/O6mW8z5zxQ3A8E8J1hUgp4SMOvEoW/x84OwyHKe/Zccz83PVT4y5Crcr530FV6NgmKI1qvGTKVl9XXVw==", "requires": { - "cheerio-select": "^1.3.0", - "dom-serializer": "^1.3.1", - "domhandler": "^4.1.0", + "cheerio-select": "^1.5.0", + "dom-serializer": "^1.3.2", + "domhandler": "^4.2.0", "htmlparser2": "^6.1.0", "parse5": "^6.0.1", - "parse5-htmlparser2-tree-adapter": "^6.0.1" + "parse5-htmlparser2-tree-adapter": "^6.0.1", + "tslib": "^2.2.0" } }, "cheerio-select": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.3.0.tgz", - "integrity": "sha512-mLgqdHxVOQyhOIkG5QnRkDg7h817Dkf0dAvlCio2TJMmR72cJKH0bF28SHXvLkVrGcGOiub0/Bs/CMnPeQO7qw==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.5.0.tgz", + "integrity": "sha512-qocaHPv5ypefh6YNxvnbABM07KMxExbtbfuJoIie3iZXX1ERwYmJcIiRrr9H05ucQP1k28dav8rpdDgjQd8drg==", "requires": { - "css-select": "^4.0.0", - "css-what": "^5.0.0", + "css-select": "^4.1.3", + "css-what": "^5.0.1", "domelementtype": "^2.2.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.2" + "domhandler": "^4.2.0", + "domutils": "^2.7.0" } }, "chokidar": { @@ -15425,6 +3183,27 @@ "path-is-absolute": "^1.0.0", "readdirp": "^2.2.1", "upath": "^1.1.1" + }, + "dependencies": { + "anymatch": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", + "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "requires": { + "micromatch": "^3.1.4", + "normalize-path": "^2.1.1" + }, + "dependencies": { + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "requires": { + "remove-trailing-separator": "^1.0.1" + } + } + } + } } }, "chownr": { @@ -15438,9 +3217,9 @@ "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==" }, "ci-info": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz", - "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ==" + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.3.0.tgz", + "integrity": "sha512-riT/3vI5YpVH6/qomlDnJow6TBee2PBKSEpx3O32EGPYbWGIRsIlGRms3Sm74wYE1JMo8RnO04Hb12+v1J5ICw==" }, "cipher-base": { "version": "1.0.4", @@ -15473,9 +3252,9 @@ } }, "clean-css": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz", - "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==", + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.4.tgz", + "integrity": "sha512-EJUDT7nDVFDvaQgAo2G/PJvxmp1o/c6iXLbswsBbUFXi1Nr+AjA2cKmfbKDMjMvzEe75g3P6JkaDDAKk96A85A==", "requires": { "source-map": "~0.6.0" }, @@ -15492,21 +3271,10 @@ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" }, - "clipboard": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz", - "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==", - "optional": true, - "requires": { - "good-listener": "^1.2.2", - "select": "^1.1.2", - "tiny-emitter": "^2.0.0" - } - }, "clipboard-copy": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", - "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-4.0.1.tgz", + "integrity": "sha512-wOlqdqziE/NNTUJsfSgXmBMIrYmfd5V0HCGsR8uAKHcg+h9NENWINcfRjtWGU77wDHC8B8ijV4hMTGYbrKovng==" }, "cliui": { "version": "5.0.0", @@ -15561,12 +3329,12 @@ } }, "color": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", - "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/color/-/color-3.2.1.tgz", + "integrity": "sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==", "requires": { - "color-convert": "^1.9.1", - "color-string": "^1.5.4" + "color-convert": "^1.9.3", + "color-string": "^1.6.0" } }, "color-convert": { @@ -15583,19 +3351,14 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz", - "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.0.tgz", + "integrity": "sha512-9Mrz2AQLefkH1UvASKj6v6hj/7eWgjnT/cVsR8CumieLoT+g900exWeNogqtweI8dxloXN9BDQTYro1oWu/5CQ==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, - "colorette": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", - "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==" - }, "combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -15718,11 +3481,18 @@ "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=" }, "content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", "requires": { - "safe-buffer": "5.1.2" + "safe-buffer": "5.2.1" + }, + "dependencies": { + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + } } }, "content-type": { @@ -15731,17 +3501,17 @@ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" }, "convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", "requires": { "safe-buffer": "~5.1.1" } }, "cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==" + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.1.tgz", + "integrity": "sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA==" }, "cookie-signature": { "version": "1.0.6", @@ -15759,16 +3529,6 @@ "mkdirp": "^0.5.1", "rimraf": "^2.5.4", "run-queue": "^1.0.0" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - } } }, "copy-descriptor": { @@ -15900,16 +3660,16 @@ } }, "core-js": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.10.1.tgz", - "integrity": "sha512-pwCxEXnj27XG47mu7SXAwhLP3L5CrlvCB91ANUkIz40P27kUcvNfSdvyZJ9CLHiVoKSp+TTChMQMSKQEH/IQxA==" + "version": "3.20.3", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.20.3.tgz", + "integrity": "sha512-vVl8j8ph6tRS3B8qir40H7yw7voy17xL0piAjlbBUsH7WIfzoedL/ZOr1OV9FyZQLWXsayOJyV4tnRyXR85/ag==" }, "core-js-compat": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.10.1.tgz", - "integrity": "sha512-ZHQTdTPkqvw2CeHiZC970NNJcnwzT6YIueDMASKt+p3WbZsLXOcoD392SkcWhkC0wBBHhlfhqGKKsNCQUozYtg==", + "version": "3.20.3", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.20.3.tgz", + "integrity": "sha512-c8M5h0IkNZ+I92QhIpuSijOxGAcj3lgpsWdkCqmUTZNwidujF4r3pi6x1DCN+Vcs5qTS2XWWMfWSuCqyupX8gw==", "requires": { - "browserslist": "^4.16.3", + "browserslist": "^4.19.1", "semver": "7.0.0" }, "dependencies": { @@ -15921,9 +3681,9 @@ } }, "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, "cosmiconfig": { "version": "5.2.1", @@ -16020,14 +3780,13 @@ "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==" }, "css": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz", - "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/css/-/css-3.0.0.tgz", + "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", "requires": { - "inherits": "^2.0.3", + "inherits": "^2.0.4", "source-map": "^0.6.1", - "source-map-resolve": "^0.5.2", - "urix": "^0.1.0" + "source-map-resolve": "^0.6.0" }, "dependencies": { "source-map": { @@ -16097,18 +3856,48 @@ "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=", "requires": { "css": "^2.0.0" + }, + "dependencies": { + "css": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz", + "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==", + "requires": { + "inherits": "^2.0.3", + "source-map": "^0.6.1", + "source-map-resolve": "^0.5.2", + "urix": "^0.1.0" + } + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + } } }, "css-select": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.0.0.tgz", - "integrity": "sha512-I7favumBlDP/nuHBKLfL5RqvlvRdn/W29evvWJ+TaoGPm7QD+xSIN5eY2dyGjtkUmemh02TZrqJb4B8DWo6PoQ==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.2.1.tgz", + "integrity": "sha512-/aUslKhzkTNCQUB2qTX84lVmfia9NyjP3WpDGtj/WxhwBzWBYUV3DgUpurHTme8UTPcPlAD1DJ+b0nN/t50zDQ==", "requires": { "boolbase": "^1.0.0", - "css-what": "^5.0.0", - "domhandler": "^4.1.0", - "domutils": "^2.5.1", - "nth-check": "^2.0.0" + "css-what": "^5.1.0", + "domhandler": "^4.3.0", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" } }, "css-select-base-adapter": { @@ -16133,9 +3922,9 @@ } }, "css-what": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz", - "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA==" + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.1.0.tgz", + "integrity": "sha512-arSMRWIIFY0hV8pIxZMEfmMI47Wj3R/aWpZDDxWYCPEiOMv6tfOrnpDtgxBYPEQD4V0Y/958+1TdC3iWTFcUPw==" }, "cssesc": { "version": "3.0.0", @@ -16408,12 +4197,6 @@ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" }, - "delegate": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz", - "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==", - "optional": true - }, "depd": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", @@ -16434,9 +4217,9 @@ "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" }, "detect-node": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz", - "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" }, "diffie-hellman": { "version": "5.0.3", @@ -16560,13 +4343,20 @@ } }, "dom-serializer": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz", - "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.2.tgz", + "integrity": "sha512-5c54Bk5Dw4qAxNOI1pFEizPSjVsx5+bpJKmL2kPn8JhBUq2q09tTCa3mjijun2NfK78NMouDYNMBkOrPZiS+ig==", "requires": { "domelementtype": "^2.0.1", - "domhandler": "^4.0.0", + "domhandler": "^4.2.0", "entities": "^2.0.0" + }, + "dependencies": { + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" + } } }, "dom-walk": { @@ -16585,21 +4375,21 @@ "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" }, "domhandler": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.1.0.tgz", - "integrity": "sha512-/6/kmsGlMY4Tup/nGVutdrK9yQi4YjWVcVeoQmixpzjOUK1U7pQkvAPHBJeUxOgxF0J8f8lwCJSlCfD0V4CMGQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.0.tgz", + "integrity": "sha512-fC0aXNQXqKSFTr2wDNZDhsEYjCiYsDWl3D01kwt25hm1YIPyDGHvvi3rw+PLqHAl/m71MaiF7d5zvBr0p5UB2g==", "requires": { "domelementtype": "^2.2.0" } }, "domutils": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.5.2.tgz", - "integrity": "sha512-MHTthCb1zj8f1GVfRpeZUbohQf/HdBos0oX5gZcQFepOZPLLRyj6Wn7XS7EMnY7CVpwv8863u2vyE83Hfu28HQ==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", "requires": { "dom-serializer": "^1.0.1", "domelementtype": "^2.2.0", - "domhandler": "^4.1.0" + "domhandler": "^4.2.0" } }, "dot-prop": { @@ -16640,6 +4430,11 @@ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, + "electron-to-chromium": { + "version": "1.4.51", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.51.tgz", + "integrity": "sha512-JNEmcYl3mk1tGQmy0EvL5eik/CKSBuzAyGP0QFdG6LIgxQe3II0BL1m2zKc2MZMf3uGqHWE1TFddJML0RpjSHQ==" + }, "elliptic": { "version": "6.5.4", "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz", @@ -16706,9 +4501,9 @@ } }, "entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-3.0.1.tgz", + "integrity": "sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==" }, "envify": { "version": "4.1.0", @@ -16741,26 +4536,30 @@ } }, "es-abstract": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz", - "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==", + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.19.1.tgz", + "integrity": "sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==", "requires": { "call-bind": "^1.0.2", "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "get-intrinsic": "^1.1.1", + "get-symbol-description": "^1.0.0", "has": "^1.0.3", "has-symbols": "^1.0.2", - "is-callable": "^1.2.3", + "internal-slot": "^1.0.3", + "is-callable": "^1.2.4", "is-negative-zero": "^2.0.1", - "is-regex": "^1.1.2", - "is-string": "^1.0.5", - "object-inspect": "^1.9.0", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.1", + "is-string": "^1.0.7", + "is-weakref": "^1.0.1", + "object-inspect": "^1.11.0", "object-keys": "^1.1.1", "object.assign": "^4.1.2", "string.prototype.trimend": "^1.0.4", "string.prototype.trimstart": "^1.0.4", - "unbox-primitive": "^1.0.0" + "unbox-primitive": "^1.0.1" } }, "es-to-primitive": { @@ -16778,6 +4577,132 @@ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, + "esbuild": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.14.7.tgz", + "integrity": "sha512-+u/msd6iu+HvfysUPkZ9VHm83LImmSNnecYPfFI01pQ7TTcsFR+V0BkybZX7mPtIaI7LCrse6YRj+v3eraJSgw==", + "requires": { + "esbuild-android-arm64": "0.14.7", + "esbuild-darwin-64": "0.14.7", + "esbuild-darwin-arm64": "0.14.7", + "esbuild-freebsd-64": "0.14.7", + "esbuild-freebsd-arm64": "0.14.7", + "esbuild-linux-32": "0.14.7", + "esbuild-linux-64": "0.14.7", + "esbuild-linux-arm": "0.14.7", + "esbuild-linux-arm64": "0.14.7", + "esbuild-linux-mips64le": "0.14.7", + "esbuild-linux-ppc64le": "0.14.7", + "esbuild-netbsd-64": "0.14.7", + "esbuild-openbsd-64": "0.14.7", + "esbuild-sunos-64": "0.14.7", + "esbuild-windows-32": "0.14.7", + "esbuild-windows-64": "0.14.7", + "esbuild-windows-arm64": "0.14.7" + } + }, + "esbuild-android-arm64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-android-arm64/-/esbuild-android-arm64-0.14.7.tgz", + "integrity": "sha512-9/Q1NC4JErvsXzJKti0NHt+vzKjZOgPIjX/e6kkuCzgfT/GcO3FVBcGIv4HeJG7oMznE6KyKhvLrFgt7CdU2/w==", + "optional": true + }, + "esbuild-darwin-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-darwin-64/-/esbuild-darwin-64-0.14.7.tgz", + "integrity": "sha512-Z9X+3TT/Xj+JiZTVlwHj2P+8GoiSmUnGVz0YZTSt8WTbW3UKw5Pw2ucuJ8VzbD2FPy0jbIKJkko/6CMTQchShQ==", + "optional": true + }, + "esbuild-darwin-arm64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.7.tgz", + "integrity": "sha512-68e7COhmwIiLXBEyxUxZSSU0akgv8t3e50e2QOtKdBUE0F6KIRISzFntLe2rYlNqSsjGWsIO6CCc9tQxijjSkw==", + "optional": true + }, + "esbuild-freebsd-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.7.tgz", + "integrity": "sha512-76zy5jAjPiXX/S3UvRgG85Bb0wy0zv/J2lel3KtHi4V7GUTBfhNUPt0E5bpSXJ6yMT7iThhnA5rOn+IJiUcslQ==", + "optional": true + }, + "esbuild-freebsd-arm64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.7.tgz", + "integrity": "sha512-lSlYNLiqyzd7qCN5CEOmLxn7MhnGHPcu5KuUYOG1i+t5A6q7LgBmfYC9ZHJBoYyow3u4CNu79AWHbvVLpE/VQQ==", + "optional": true + }, + "esbuild-linux-32": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-32/-/esbuild-linux-32-0.14.7.tgz", + "integrity": "sha512-Vk28u409wVOXqTaT6ek0TnfQG4Ty1aWWfiysIaIRERkNLhzLhUf4i+qJBN8mMuGTYOkE40F0Wkbp6m+IidOp2A==", + "optional": true + }, + "esbuild-linux-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-64/-/esbuild-linux-64-0.14.7.tgz", + "integrity": "sha512-+Lvz6x+8OkRk3K2RtZwO+0a92jy9si9cUea5Zoru4yJ/6EQm9ENX5seZE0X9DTwk1dxJbjmLsJsd3IoowyzgVg==", + "optional": true + }, + "esbuild-linux-arm": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-arm/-/esbuild-linux-arm-0.14.7.tgz", + "integrity": "sha512-OzpXEBogbYdcBqE4uKynuSn5YSetCvK03Qv1HcOY1VN6HmReuatjJ21dCH+YPHSpMEF0afVCnNfffvsGEkxGJQ==", + "optional": true + }, + "esbuild-linux-arm64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.7.tgz", + "integrity": "sha512-kJd5beWSqteSAW086qzCEsH6uwpi7QRIpzYWHzEYwKKu9DiG1TwIBegQJmLpPsLp4v5RAFjea0JAmAtpGtRpqg==", + "optional": true + }, + "esbuild-linux-mips64le": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.7.tgz", + "integrity": "sha512-mFWpnDhZJmj/h7pxqn1GGDsKwRfqtV7fx6kTF5pr4PfXe8pIaTERpwcKkoCwZUkWAOmUEjMIUAvFM72A6hMZnA==", + "optional": true + }, + "esbuild-linux-ppc64le": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.7.tgz", + "integrity": "sha512-wM7f4M0bsQXfDL4JbbYD0wsr8cC8KaQ3RPWc/fV27KdErPW7YsqshZZSjDV0kbhzwpNNdhLItfbaRT8OE8OaKA==", + "optional": true + }, + "esbuild-netbsd-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.7.tgz", + "integrity": "sha512-J/afS7woKyzGgAL5FlgvMyqgt5wQ597lgsT+xc2yJ9/7BIyezeXutXqfh05vszy2k3kSvhLesugsxIA71WsqBw==", + "optional": true + }, + "esbuild-openbsd-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.7.tgz", + "integrity": "sha512-7CcxgdlCD+zAPyveKoznbgr3i0Wnh0L8BDGRCjE/5UGkm5P/NQko51tuIDaYof8zbmXjjl0OIt9lSo4W7I8mrw==", + "optional": true + }, + "esbuild-sunos-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-sunos-64/-/esbuild-sunos-64-0.14.7.tgz", + "integrity": "sha512-GKCafP2j/KUljVC3nesw1wLFSZktb2FGCmoT1+730zIF5O6hNroo0bSEofm6ZK5mNPnLiSaiLyRB9YFgtkd5Xg==", + "optional": true + }, + "esbuild-windows-32": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-windows-32/-/esbuild-windows-32-0.14.7.tgz", + "integrity": "sha512-5I1GeL/gZoUUdTPA0ws54bpYdtyeA2t6MNISalsHpY269zK8Jia/AXB3ta/KcDHv2SvNwabpImeIPXC/k0YW6A==", + "optional": true + }, + "esbuild-windows-64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-windows-64/-/esbuild-windows-64-0.14.7.tgz", + "integrity": "sha512-CIGKCFpQOSlYsLMbxt8JjxxvVw9MlF1Rz2ABLVfFyHUF5OeqHD5fPhGrCVNaVrhO8Xrm+yFmtjcZudUGr5/WYQ==", + "optional": true + }, + "esbuild-windows-arm64": { + "version": "0.14.7", + "resolved": "https://registry.npmjs.org/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.7.tgz", + "integrity": "sha512-eOs1eSivOqN7cFiRIukEruWhaCf75V0N8P0zP7dh44LIhLl8y6/z++vv9qQVbkBm5/D7M7LfCfCTmt1f1wHOCw==", + "optional": true + }, "escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", @@ -16826,9 +4751,9 @@ }, "dependencies": { "estraverse": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", - "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==" + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==" } } }, @@ -16921,16 +4846,16 @@ } }, "express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", + "version": "4.17.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.17.2.tgz", + "integrity": "sha512-oxlxJxcQlYwqPWKVJJtvQiwHgosH/LrLSPA+H4UxpyvSS6jC5aH+5MoHFM+KABgTOt0APue4w66Ha8jCUo9QGg==", "requires": { "accepts": "~1.3.7", "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", + "body-parser": "1.19.1", + "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.4.0", + "cookie": "0.4.1", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "~1.1.2", @@ -16944,13 +4869,13 @@ "on-finished": "~2.3.0", "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", + "proxy-addr": "~2.0.7", + "qs": "6.9.6", "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", + "safe-buffer": "5.2.1", + "send": "0.17.2", + "serve-static": "1.14.2", + "setprototypeof": "1.2.0", "statuses": "~1.5.0", "type-is": "~1.6.18", "utils-merge": "1.0.1", @@ -16969,6 +4894,11 @@ "requires": { "ms": "2.0.0" } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" } } }, @@ -17065,9 +4995,9 @@ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" }, "faye-websocket": { - "version": "0.11.3", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz", - "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==", + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", "requires": { "websocket-driver": ">=0.5.1" } @@ -17148,9 +5078,9 @@ } }, "find-cache-dir": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", - "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", "requires": { "commondir": "^1.0.1", "make-dir": "^3.0.2", @@ -17209,9 +5139,9 @@ } }, "forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==" }, "fragment-cache": { "version": "0.2.1", @@ -17309,6 +5239,15 @@ "pump": "^3.0.0" } }, + "get-symbol-description": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", + "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "requires": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + } + }, "get-value": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", @@ -17323,9 +5262,9 @@ } }, "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -17396,15 +5335,6 @@ "slash": "^2.0.0" } }, - "good-listener": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz", - "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=", - "optional": true, - "requires": { - "delegate": "^3.1.2" - } - }, "got": { "version": "9.6.0", "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", @@ -17429,11 +5359,11 @@ "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" }, "gray-matter": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.2.tgz", - "integrity": "sha512-7hB/+LxrOjq/dd8APlK0r24uL/67w7SkYnfwhNFwg/VDIGWGmduTDYf3WNstLW2fbbmRwrDGCVSJ2isuf2+4Hw==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", "requires": { - "js-yaml": "^3.11.0", + "js-yaml": "^3.13.1", "kind-of": "^6.0.2", "section-matter": "^1.0.0", "strip-bom-string": "^1.0.0" @@ -17489,6 +5419,14 @@ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==" }, + "has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "requires": { + "has-symbols": "^1.0.2" + } + }, "has-value": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", @@ -17574,6 +5512,11 @@ "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz", "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ==" }, + "highlight.js": { + "version": "9.18.5", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-9.18.5.tgz", + "integrity": "sha512-a5bFyofd/BHCX52/8i8uJkjr9DYwXIPnM/plwI6W7ezItLGqzt7X2G2nXuYSfsIJdkwwj/g9DG1LkcGJI/dDoA==" + }, "hmac-drbg": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", @@ -17601,9 +5544,9 @@ } }, "hotkeys-js": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz", - "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw==" + "version": "3.8.7", + "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.7.tgz", + "integrity": "sha512-ckAx3EkUr5XjDwjEHDorHxRO2Kb7z6Z2Sxul4MbBkN8Nho7XDslQsgMJT+CiJ5Z4TgRxxvKHEpuLE3imzqy4Lg==" }, "hpack.js": { "version": "2.1.6", @@ -17659,6 +5602,13 @@ "domhandler": "^4.0.0", "domutils": "^2.5.2", "entities": "^2.0.0" + }, + "dependencies": { + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" + } } }, "http-cache-semantics": { @@ -17672,28 +5622,21 @@ "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc=" }, "http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.8.1.tgz", + "integrity": "sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==", "requires": { "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "dependencies": { - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - } + "toidentifier": "1.0.1" } }, "http-parser-js": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz", - "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg==" + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.5.tgz", + "integrity": "sha512-x+JVEkO2PoM8qqpbPbOL3cqHPwerep7OwzK7Ay+sMQjKzaKCqWvjoXm5tqMP9tXWWTnTzAjIhXg+J99XYuPhPA==" }, "http-proxy": { "version": "1.18.1", @@ -17706,14 +5649,55 @@ } }, "http-proxy-middleware": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", - "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-1.3.1.tgz", + "integrity": "sha512-13eVVDYS4z79w7f1+NPllJtOQFx/FdUW4btIvVRMaRlUY9VGstAbo5MOhLEuUgZFRHn3x50ufn25zkj/boZnEg==", "requires": { - "http-proxy": "^1.17.0", - "is-glob": "^4.0.0", - "lodash": "^4.17.11", - "micromatch": "^3.1.10" + "@types/http-proxy": "^1.17.5", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "dependencies": { + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "requires": { + "fill-range": "^7.0.1" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" + }, + "micromatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz", + "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==", + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.2.3" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "requires": { + "is-number": "^7.0.0" + } + } } }, "http-signature": { @@ -17894,6 +5878,16 @@ "ipaddr.js": "^1.9.0" } }, + "internal-slot": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz", + "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==", + "requires": { + "get-intrinsic": "^1.1.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" + } + }, "ip": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", @@ -17933,11 +5927,12 @@ } }, "is-arguments": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz", - "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", "requires": { - "call-bind": "^1.0.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" } }, "is-arrayish": { @@ -17946,9 +5941,12 @@ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" }, "is-bigint": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz", - "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==" + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "requires": { + "has-bigints": "^1.0.1" + } }, "is-binary-path": { "version": "1.0.1", @@ -17959,11 +5957,12 @@ } }, "is-boolean-object": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz", - "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", "requires": { - "call-bind": "^1.0.0" + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" } }, "is-buffer": { @@ -17972,9 +5971,9 @@ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" }, "is-callable": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz", - "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==" + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.4.tgz", + "integrity": "sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==" }, "is-ci": { "version": "2.0.0", @@ -18005,9 +6004,9 @@ } }, "is-core-module": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", - "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.8.1.tgz", + "integrity": "sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA==", "requires": { "has": "^1.0.3" } @@ -18031,9 +6030,12 @@ } }, "is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-descriptor": { "version": "0.1.6", @@ -18082,9 +6084,9 @@ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" }, "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "requires": { "is-extglob": "^2.1.1" } @@ -18106,9 +6108,9 @@ } }, "is-negative-zero": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", - "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==" + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", + "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==" }, "is-npm": { "version": "4.0.0", @@ -18134,9 +6136,12 @@ } }, "is-number-object": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz", - "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==" + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.6.tgz", + "integrity": "sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==", + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-obj": { "version": "2.0.0", @@ -18165,9 +6170,9 @@ } }, "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==" }, "is-plain-object": { "version": "2.0.4", @@ -18183,12 +6188,12 @@ "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" }, "is-regex": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz", - "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", "requires": { "call-bind": "^1.0.2", - "has-symbols": "^1.0.1" + "has-tostringtag": "^1.0.0" } }, "is-resolvable": { @@ -18196,22 +6201,30 @@ "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz", "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg==" }, + "is-shared-array-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz", + "integrity": "sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA==" + }, "is-stream": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" }, "is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "requires": { + "has-tostringtag": "^1.0.0" + } }, "is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", "requires": { - "has-symbols": "^1.0.1" + "has-symbols": "^1.0.2" } }, "is-typedarray": { @@ -18219,6 +6232,14 @@ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" }, + "is-weakref": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", + "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "requires": { + "call-bind": "^1.0.2" + } + }, "is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -18304,9 +6325,9 @@ "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" }, "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, "json-schema-traverse": { "version": "0.4.1", @@ -18358,13 +6379,13 @@ } }, "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", "requires": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", - "json-schema": "0.2.3", + "json-schema": "0.4.0", "verror": "1.10.0" } }, @@ -18516,9 +6537,9 @@ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "loglevel": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", - "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==" + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.8.0.tgz", + "integrity": "sha512-G6A/nJLRgWOuuwdNuA6koovfEV1YpqqAG4pRUlFaz3jj2QNZ8M4vBqnVA+HBTmU/AMNUtlOsMmSpF6NyOjztbA==" }, "lower-case": { "version": "1.1.4", @@ -18560,9 +6581,9 @@ } }, "markdown-it": { - "version": "12.0.4", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.4.tgz", - "integrity": "sha512-34RwOXZT8kyuOJy25oJNJoulO8L0bTHYWXcdZBYZqFnjIy3NgjeoM3FmPXIOFQ26/lSHYMr8oc62B6adxXcb3Q==", + "version": "12.3.2", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", + "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", "requires": { "argparse": "^2.0.1", "entities": "~2.1.0", @@ -18576,10 +6597,15 @@ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, + "entities": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" + }, "linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", + "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", "requires": { "uc.micro": "^1.0.1" } @@ -18592,9 +6618,9 @@ "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==" }, "markdown-it-attrs": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz", - "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==" + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-4.1.3.tgz", + "integrity": "sha512-d5yg/lzQV2KFI/4LPsZQB3uxQrf0/l2/RnMPCPm4lYLOZUSmFlpPccyojnzaHkfQpAD8wBHfnfUW0aMhpKOS2g==" }, "markdown-it-chain": { "version": "1.3.0", @@ -18750,21 +6776,21 @@ } }, "mime": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz", - "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==" + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz", + "integrity": "sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==" }, "mime-db": { - "version": "1.47.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", - "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==" + "version": "1.51.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.51.0.tgz", + "integrity": "sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==" }, "mime-types": { - "version": "2.1.30", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", - "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", + "version": "2.1.34", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.34.tgz", + "integrity": "sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==", "requires": { - "mime-db": "1.47.0" + "mime-db": "1.51.0" } }, "mimic-response": { @@ -18862,11 +6888,14 @@ } } }, - "mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" - }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + }, "move-concurrently": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", @@ -18878,16 +6907,6 @@ "mkdirp": "^0.5.1", "rimraf": "^2.5.4", "run-queue": "^1.0.3" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - } } }, "ms": { @@ -18910,9 +6929,9 @@ "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" }, "nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.15.0.tgz", + "integrity": "sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ==", "optional": true }, "nanomatch": { @@ -19018,9 +7037,9 @@ } }, "node-releases": { - "version": "1.1.71", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz", - "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg==" + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.1.tgz", + "integrity": "sha512-CqyzN6z7Q6aMeF/ktcMVTzhAHCEpf8SOarwpzpf8pNBY2k5/oM34UHldUwp8VKI7uxct2HxSRdJjBaZeESzcxA==" }, "nopt": { "version": "1.0.10", @@ -19064,9 +7083,9 @@ "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E=" }, "nth-check": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz", - "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.1.tgz", + "integrity": "sha512-it1vE95zF6dTT9lBsYbxvqh0Soy4SPowchj0UBGj/V6cTPnXXtQOPUbhZ6CmGzAD/rW22LQK6E96pcdJXk4A4w==", "requires": { "boolbase": "^1.0.0" } @@ -19115,9 +7134,9 @@ } }, "object-inspect": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.9.0.tgz", - "integrity": "sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw==" + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.0.tgz", + "integrity": "sha512-Ho2z80bVIvJloH+YzRmpZVQe87+qASmBUKZDWgx9cu+KDrX2ZDH/3tMy+gXbZETVGs2M8YdxObOh7XAtim9Y0g==" }, "object-is": { "version": "1.1.5", @@ -19153,13 +7172,13 @@ } }, "object.getownpropertydescriptors": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz", - "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.3.tgz", + "integrity": "sha512-VdDoCwvJI4QdC6ndjpqFmoL3/+HxffFBbcJzKi5hwLLqqx3mdbedRpfZDdK0SrOSauj8X4GzBvnDZl4vTN7dOw==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2" + "es-abstract": "^1.19.1" } }, "object.pick": { @@ -19171,14 +7190,13 @@ } }, "object.values": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz", - "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==", + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz", + "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.2", - "has": "^1.0.3" + "es-abstract": "^1.19.1" } }, "obuf": { @@ -19221,9 +7239,9 @@ } }, "optimize-css-assets-webpack-plugin": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz", - "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==", + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.8.tgz", + "integrity": "sha512-mgFS1JdOtEGzD8l+EuISqL57cKO+We9GcoiQEmdCWRqqck+FGNmYJtx9qfAPzEz+lRrlThWMuGDaRkI/yWNx/Q==", "requires": { "cssnano": "^4.1.10", "last-call-webpack-plugin": "^3.0.0" @@ -19395,9 +7413,9 @@ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "path-to-regexp": { "version": "0.1.7", @@ -19436,11 +7454,15 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + }, "picomatch": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz", - "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==", - "optional": true + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" }, "pify": { "version": "4.0.1", @@ -19486,14 +7508,6 @@ "ms": "^2.1.1" } }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - }, "ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -19507,27 +7521,23 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.35", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", - "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" + "picocolors": "^0.2.1", + "source-map": "^0.6.1" }, "dependencies": { + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } } } }, @@ -20009,13 +8019,11 @@ } }, "postcss-selector-parser": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", - "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz", + "integrity": "sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ==", "requires": { "cssesc": "^3.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1", "util-deprecate": "^1.0.2" } }, @@ -20047,9 +8055,9 @@ } }, "postcss-value-parser": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", - "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==" + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" }, "prepend-http": { "version": "2.0.0", @@ -20057,9 +8065,9 @@ "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" }, "prettier": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", - "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.5.1.tgz", + "integrity": "sha512-vBZcPRUR5MZJwoyi3ZoyQlc1rXeEck8KgeC9AwwOn+exuxLxq5toTRDTSaVrXHxelDMHy9zlicw8u66yxoSUFg==", "optional": true }, "pretty-error": { @@ -20077,12 +8085,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.23.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz", - "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==", - "requires": { - "clipboard": "^2.0.0" - } + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.26.0.tgz", + "integrity": "sha512-HUoH9C5Z3jKkl3UunCyiD5jwk0+Hz0fIgQ2nbwU2Oo/ceuTAQAg+pPVnfdt2TJWRVLcxKh9iuoYDUSc8clb5UQ==" }, "process": { "version": "0.11.10", @@ -20108,11 +8113,11 @@ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" }, "proxy-addr": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", - "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", "requires": { - "forwarded": "~0.1.2", + "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, @@ -20320,9 +8325,9 @@ "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=" }, "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" + "version": "6.9.6", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.6.tgz", + "integrity": "sha512-TIRk4aqYLNoJUbd+g2lEdz5kLWIuTMRagAXxl78Q0RiVjAOugHmeKNGdd3cwo/ktpf9aL9epCfFqWDEKysUlLQ==" }, "query-string": { "version": "5.1.1", @@ -20372,20 +8377,20 @@ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" }, "raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.2.tgz", + "integrity": "sha512-RPMAFUJP19WIet/99ngh6Iv8fzAbqum4Li7AD6DtGaW2RpMB/11xDoalPiJMTbu6I3hkbMVkATvZrqb9EEqeeQ==", "requires": { - "bytes": "3.1.0", - "http-errors": "1.7.2", + "bytes": "3.1.1", + "http-errors": "1.8.1", "iconv-lite": "0.4.24", "unpipe": "1.0.0" }, "dependencies": { "bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.1.tgz", + "integrity": "sha512-dWe4nWO/ruEOY7HkUJ5gFt1DCFV9zPRoJr8pV0/ASQermOZjtq8jMjOprC0Kd10GLN+l7xaUPvxzJFWtxGu8Fg==" } } }, @@ -20438,17 +8443,17 @@ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" }, "regenerate-unicode-properties": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", - "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-9.0.0.tgz", + "integrity": "sha512-3E12UeNSPfjrgwjkR81m5J7Aw/T55Tu7nUyZVQYCKEOs+2dkxEY+DpPtZzO4YruuiPb7NkYLVcyJC4+zCbk5pA==", "requires": { - "regenerate": "^1.4.0" + "regenerate": "^1.4.2" } }, "regenerator-runtime": { - "version": "0.13.7", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", - "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" + "version": "0.13.9", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz", + "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==" }, "regenerator-transform": { "version": "0.14.5", @@ -20487,25 +8492,25 @@ } }, "regexp.prototype.flags": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz", - "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.1.tgz", + "integrity": "sha512-pMR7hBVUUGI7PMA37m2ofIdQCsomVnas+Jn5UPGAHQ+/LlwKm/aTLJHdasmHRzlfeZwHiAOaRSo2rbBDm3nNUQ==", "requires": { "call-bind": "^1.0.2", "define-properties": "^1.1.3" } }, "regexpu-core": { - "version": "4.7.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz", - "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==", + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.8.0.tgz", + "integrity": "sha512-1F6bYsoYiz6is+oz70NWur2Vlh9KWtswuRuzJOfeYUrfPX2o8n74AnUVaOGDbUqVGO9fNHu48/pjJO4sNVwsOg==", "requires": { - "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.2.0", - "regjsgen": "^0.5.1", - "regjsparser": "^0.6.4", - "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.2.0" + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^9.0.0", + "regjsgen": "^0.5.2", + "regjsparser": "^0.7.0", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.0.0" } }, "registry-auth-token": { @@ -20530,9 +8535,9 @@ "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==" }, "regjsparser": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz", - "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==", + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.7.0.tgz", + "integrity": "sha512-A4pcaORqmNMDVwUjWoTzuhwMGpP+NykpfqAsEgI1FSH/EzC7lrN5TMd+kN8YCovX+jMpu8eaqXgXPCa0g8FQNQ==", "requires": { "jsesc": "~0.5.0" }, @@ -20555,109 +8560,15 @@ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" }, "renderkid": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz", - "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.7.tgz", + "integrity": "sha512-oCcFyxaMrKsKcTY59qnCAtmDVSLfPbrv6A3tVbPdFMMrv5jaK10V6m40cKsoPNhAqN6rmHW9sswW4o3ruSrwUQ==", "requires": { - "css-select": "^2.0.2", - "dom-converter": "^0.2", - "htmlparser2": "^3.10.1", - "lodash": "^4.17.20", - "strip-ansi": "^3.0.0" - }, - "dependencies": { - "css-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz", - "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^3.2.1", - "domutils": "^1.7.0", - "nth-check": "^1.0.2" - } - }, - "css-what": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", - "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==" - }, - "dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "requires": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - }, - "dependencies": { - "domelementtype": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz", - "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==" - } - } - }, - "domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==" - }, - "domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "requires": { - "domelementtype": "1" - } - }, - "domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "requires": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "htmlparser2": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz", - "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==", - "requires": { - "domelementtype": "^1.3.1", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^3.1.1" - }, - "dependencies": { - "entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==" - } - } - }, - "nth-check": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz", - "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==", - "requires": { - "boolbase": "~1.0.0" - } - }, - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^3.0.1" } }, "repeat-element": { @@ -20698,9 +8609,9 @@ }, "dependencies": { "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==" } } }, @@ -20720,12 +8631,13 @@ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, "resolve": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", - "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.0.tgz", + "integrity": "sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw==", "requires": { - "is-core-module": "^2.2.0", - "path-parse": "^1.0.6" + "is-core-module": "^2.8.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" } }, "resolve-cwd": { @@ -20841,21 +8753,15 @@ "kind-of": "^6.0.0" } }, - "select": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz", - "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=", - "optional": true - }, "select-hose": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=" }, "selfsigned": { - "version": "1.10.8", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz", - "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==", + "version": "1.10.14", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.14.tgz", + "integrity": "sha512-lkjaiAye+wBZDCBsu5BGi0XiLRxeUlsGod5ZP924CRSEoGuZAw/f7y9RKu28rwTfiHVhdavhB0qH0INV6P1lEA==", "requires": { "node-forge": "^0.10.0" } @@ -20874,9 +8780,9 @@ } }, "send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", + "version": "0.17.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.17.2.tgz", + "integrity": "sha512-UJYB6wFSJE3G00nEivR5rgWp8c2xXvJ3OPWPhmuteU0IKj8nKbG3DrjiOmLwpnHGYWAVwA69zmTm++YG0Hmwww==", "requires": { "debug": "2.6.9", "depd": "~1.1.2", @@ -20885,9 +8791,9 @@ "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", - "http-errors": "~1.7.2", + "http-errors": "1.8.1", "mime": "1.6.0", - "ms": "2.1.1", + "ms": "2.1.3", "on-finished": "~2.3.0", "range-parser": "~1.2.1", "statuses": "~1.5.0" @@ -20914,9 +8820,9 @@ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" }, "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" } } }, @@ -20974,14 +8880,14 @@ } }, "serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", + "version": "1.14.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.2.tgz", + "integrity": "sha512-+TMNA9AFxUEGuC0z2mevogSnn9MXKb4fa7ngeRMJaaGv8vTwnIEkKi+QGvPt33HSnf8pRS+WGM0EbMtCJLKMBQ==", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.17.1" + "send": "0.17.2" } }, "set-blocking": { @@ -21006,9 +8912,9 @@ "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=" }, "setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" }, "sha.js": { "version": "2.4.11", @@ -21032,10 +8938,20 @@ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "requires": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + } + }, "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.6.tgz", + "integrity": "sha512-sDl4qMFpijcGw22U5w63KmD3cZJfBuFlVNbVMKje2keoKML7X2UzWbc4XrmEbDwg0NXJc3yv4/ox7b+JWb57kQ==" }, "simple-swizzle": { "version": "0.2.2", @@ -21108,6 +9024,18 @@ "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=" + }, + "source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } } } }, @@ -21176,26 +9104,33 @@ } }, "sockjs": { - "version": "0.3.21", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz", - "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==", + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", "requires": { "faye-websocket": "^0.11.3", - "uuid": "^3.4.0", + "uuid": "^8.3.2", "websocket-driver": "^0.7.4" + }, + "dependencies": { + "uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + } } }, "sockjs-client": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz", - "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==", + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.2.tgz", + "integrity": "sha512-ZzRxPBISQE7RpzlH4tKJMQbHM9pabHluk0WBaxAQ+wm/UieeBVBou0p4wVnSQGN9QmpAZygQ0cDIypWuqOFmFQ==", "requires": { "debug": "^3.2.6", "eventsource": "^1.0.7", "faye-websocket": "^0.11.3", "inherits": "^2.0.4", "json3": "^3.3.3", - "url-parse": "^1.5.1" + "url-parse": "^1.5.3" }, "dependencies": { "debug": { @@ -21219,6 +9154,13 @@ "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", "requires": { "is-plain-obj": "^1.0.0" + }, + "dependencies": { + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" + } } }, "source-list-map": { @@ -21232,21 +9174,18 @@ "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==" }, "source-map-resolve": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", - "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz", + "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", "requires": { "atob": "^2.1.2", - "decode-uri-component": "^0.2.0", - "resolve-url": "^0.2.1", - "source-map-url": "^0.4.0", - "urix": "^0.1.0" + "decode-uri-component": "^0.2.0" } }, "source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -21277,9 +9216,9 @@ }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } @@ -21305,9 +9244,9 @@ }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } @@ -21362,9 +9301,9 @@ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" }, "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "version": "1.17.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", + "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", "requires": { "asn1": "~0.2.3", "assert-plus": "^1.0.0", @@ -21430,11 +9369,11 @@ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=" }, "std-env": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz", - "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.1.tgz", + "integrity": "sha512-eOsoKTWnr6C8aWrqJJ2KAReXoa7Vn5Ywyw6uCXgA/xDhxPoaIsBa5aNJmISY04dLwXPBnDHW4diGM7Sn5K4R/g==", "requires": { - "ci-info": "^3.0.0" + "ci-info": "^3.1.1" } }, "stream-browserify": { @@ -21477,14 +9416,6 @@ "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - }, "string-width": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", @@ -21528,6 +9459,14 @@ "define-properties": "^1.1.3" } }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, "strip-ansi": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", @@ -21574,18 +9513,31 @@ } }, "stylus": { - "version": "0.54.8", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", - "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.56.0.tgz", + "integrity": "sha512-Ev3fOb4bUElwWu4F9P9WjnnaSpc8XB9OFHSFZSKMFL1CE1oM+oFXWEgAqPmmZIyhBihuqIQlFsVTypiiS9RxeA==", "requires": { - "css-parse": "~2.0.0", - "debug": "~3.1.0", + "css": "^3.0.0", + "debug": "^4.3.2", "glob": "^7.1.6", - "mkdirp": "~1.0.4", "safer-buffer": "^2.1.2", "sax": "~1.2.4", - "semver": "^6.3.0", "source-map": "^0.7.3" + }, + "dependencies": { + "debug": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + } } }, "stylus-loader": { @@ -21606,6 +9558,11 @@ "has-flag": "^3.0.0" } }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==" + }, "svg-tags": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", @@ -21677,13 +9634,10 @@ "domelementtype": "1" } }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } + "entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==" }, "nth-check": { "version": "1.0.2", @@ -21864,12 +9818,6 @@ "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" }, - "tiny-emitter": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz", - "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==", - "optional": true - }, "to-arraybuffer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", @@ -21948,9 +9896,9 @@ } }, "toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==" + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==" }, "token-stream": { "version": "1.0.0", @@ -21984,6 +9932,11 @@ "punycode": "^2.1.0" } }, + "tslib": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz", + "integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw==" + }, "tty-browserify": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", @@ -22067,28 +10020,28 @@ } }, "unicode-canonical-property-names-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz", - "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==" }, "unicode-match-property-ecmascript": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz", - "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", "requires": { - "unicode-canonical-property-names-ecmascript": "^1.0.4", - "unicode-property-aliases-ecmascript": "^1.0.4" + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" } }, "unicode-match-property-value-ecmascript": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", - "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.0.0.tgz", + "integrity": "sha512-7Yhkc0Ye+t4PNYzOGKedDhXbYIBe1XEQYQxOPyhcXNMJ0WCABqqj6ckydd6pWRZTHV4GuCPKdBAUiMc60tsKVw==" }, "unicode-property-aliases-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", - "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.0.0.tgz", + "integrity": "sha512-5Zfuy9q/DFr4tfO7ZPeVXb1aPoeQSdeFMLpYuFebehDAhbuevLs5yxSZmIFN1tP5F9Wl4IpJrYojg85/zgyZHQ==" }, "union-value": { "version": "1.0.1", @@ -22318,9 +10271,9 @@ } }, "url-parse": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz", - "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.4.tgz", + "integrity": "sha512-ITeAByWWoqutFClc/lRZnFplgXgEZr3WJ6XngMM/N9DMIm4K8zXPCZ1Jdu0rERwO84w1WC5wkle2ubwTA4NTBg==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -22408,6 +10361,13 @@ "assert-plus": "^1.0.0", "core-util-is": "1.0.2", "extsprintf": "^1.2.0" + }, + "dependencies": { + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + } } }, "vm-browserify": { @@ -22421,9 +10381,9 @@ "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk=" }, "vue": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz", - "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg==" + "version": "2.6.14", + "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.14.tgz", + "integrity": "sha512-x2284lgYvjOMj3Za7kqzRcUSxBboHqtgRE2zlos1qWaOye5yUmHn42LB1250NJBLRwEcdrB0JRwyPTEPhfQjiQ==" }, "vue-hot-reload-api": { "version": "2.3.4", @@ -22431,9 +10391,9 @@ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" }, "vue-loader": { - "version": "15.9.6", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz", - "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==", + "version": "15.9.8", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.8.tgz", + "integrity": "sha512-GwSkxPrihfLR69/dSV3+5CdMQ0D+jXg8Ma1S4nQXKJAznYFX14vHdc/NetQc34Dw+rBbIJyP7JOuVb9Fhprvog==", "requires": { "@vue/component-compiler-utils": "^3.1.0", "hash-sum": "^1.0.2", @@ -22443,14 +10403,14 @@ } }, "vue-router": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz", - "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw==" + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.3.tgz", + "integrity": "sha512-FUlILrW3DGitS2h+Xaw8aRNvGTwtuaxrRkNSHWTizOfLUie7wuYwezeZ50iflRn8YPV5kxmU2LQuu3nM/b3Zsg==" }, "vue-server-renderer": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz", - "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==", + "version": "2.6.14", + "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.14.tgz", + "integrity": "sha512-HifYRa/LW7cKywg9gd4ZtvtRuBlstQBao5ZCWlg40fyB4OPoGfEXAzxb0emSLv4pBDOHYx0UjpqvxpiQFEuoLA==", "requires": { "chalk": "^1.1.3", "hash-sum": "^1.0.2", @@ -22509,9 +10469,9 @@ } }, "vue-template-compiler": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz", - "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==", + "version": "2.6.14", + "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.14.tgz", + "integrity": "sha512-ODQS1SyMbjKoO1JBJZojSw6FE4qnh9rIpUZn2EUT86FKizx9uH5z6uXiIrm4/Nb/gwxTi/o17ZDEGWAXHvtC7g==", "requires": { "de-indent": "^1.0.2", "he": "^1.1.0" @@ -22523,12 +10483,13 @@ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" }, "vuepress": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz", - "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==", + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.9.7.tgz", + "integrity": "sha512-aSXpoJBGhgjaWUsT1Zs/ZO8JdDWWsxZRlVme/E7QYpn+ZB9iunSgPMozJQNFaHzcRq4kPx5A4k9UhzLRcvtdMg==", "requires": { - "@vuepress/core": "1.8.2", - "@vuepress/theme-default": "1.8.2", + "@vuepress/core": "1.9.7", + "@vuepress/theme-default": "1.9.7", + "@vuepress/types": "1.9.7", "cac": "^6.5.6", "envinfo": "^7.2.0", "opencollective-postinstall": "^2.0.2", @@ -22617,27 +10578,27 @@ } }, "vuepress-theme-cosmos": { - "version": "1.0.182", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz", - "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==", + "version": "1.0.183", + "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.183.tgz", + "integrity": "sha512-nLSL0YF6ar2yhZkDvp6o313xBSu/Zc3O3OxRsgLMZcKyWanNqyyh0jFrUqMZcjz7vylRRDth6C2/E0YeisFCbw==", "requires": { "@cosmos-ui/vue": "^0.35.0", - "@vuepress/plugin-google-analytics": "1.7.1", + "@vuepress/plugin-google-analytics": "1.8.2", "algoliasearch": "^4.2.0", - "axios": "^0.21.0", + "axios": "^0.24.0", "cheerio": "^1.0.0-rc.3", - "clipboard-copy": "^3.1.0", - "entities": "2.1.0", + "clipboard-copy": "^4.0.1", + "entities": "3.0.1", "esm": "^3.2.25", "gray-matter": "^4.0.2", - "hotkeys-js": "3.8.1", + "hotkeys-js": "3.8.7", "jsonp": "^0.2.1", "markdown-it": "^12.0.0", - "markdown-it-attrs": "^3.0.3", + "markdown-it-attrs": "^4.0.0", "prismjs": "^1.22.0", "pug": "^3.0.1", "pug-plain-loader": "^1.0.0", - "stylus": "^0.54.8", + "stylus": "^0.56.0", "stylus-loader": "^3.0.2", "tiny-cookie": "^2.3.2", "v-runtime-template": "^1.10.0", @@ -22647,9 +10608,9 @@ } }, "watchpack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.2.0.tgz", - "integrity": "sha512-up4YAn/XHgZHIxFBVCdlMiWDj6WaLKpwVeGQk2I5thdYxF/KmF0aaz6TfJZ/hfl1h/XlcDr7k1KH7ThDagpFaA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.3.1.tgz", + "integrity": "sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", @@ -22721,16 +10682,6 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==" }, - "anymatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", - "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", - "optional": true, - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } - }, "binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", @@ -22747,19 +10698,19 @@ } }, "chokidar": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz", - "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==", + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", "optional": true, "requires": { - "anymatch": "~3.1.1", + "anymatch": "~3.1.2", "braces": "~3.0.2", - "fsevents": "~2.3.1", - "glob-parent": "~5.1.0", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", - "readdirp": "~3.5.0" + "readdirp": "~3.6.0" } }, "fill-range": { @@ -22801,18 +10752,10 @@ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "optional": true }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - }, "readdirp": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", - "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "optional": true, "requires": { "picomatch": "^2.2.1" @@ -22860,9 +10803,9 @@ }, "dependencies": { "javascript-stringify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.0.1.tgz", - "integrity": "sha512-yV+gqbd5vaOYjqlbk16EG89xB5udgjqQF3C5FAORDg4f/IS1Yc5ERCv5e/57yBcfJYw05V5JyIXabhwb75Xxow==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.1.0.tgz", + "integrity": "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg==" } } }, @@ -22876,24 +10819,14 @@ "mkdirp": "^0.5.1", "range-parser": "^1.2.1", "webpack-log": "^2.0.0" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - } } }, "webpack-dev-server": { - "version": "3.11.2", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz", - "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==", + "version": "3.11.3", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.3.tgz", + "integrity": "sha512-3x31rjbEQWKMNzacUZRE6wXvUFuGpH7vr0lIEbYpMAG9BOxi0928QU1BBswOAP3kg3H1O4hiS+sq4YyAn6ANnA==", "requires": { - "ansi-html": "0.0.7", + "ansi-html-community": "0.0.8", "bonjour": "^3.5.0", "chokidar": "^2.1.8", "compression": "^1.7.4", @@ -22929,13 +10862,24 @@ }, "dependencies": { "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", + "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", "requires": { "ms": "2.1.2" } }, + "http-proxy-middleware": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz", + "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==", + "requires": { + "http-proxy": "^1.17.0", + "is-glob": "^4.0.0", + "lodash": "^4.17.11", + "micromatch": "^3.1.10" + } + }, "is-absolute-url": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", @@ -23078,9 +11022,9 @@ }, "dependencies": { "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" }, "emoji-regex": { "version": "8.0.0", @@ -23093,21 +11037,21 @@ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" }, "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "requires": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" + "strip-ansi": "^6.0.1" } }, "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "requires": { - "ansi-regex": "^5.0.0" + "ansi-regex": "^5.0.1" } } } @@ -23173,9 +11117,9 @@ } }, "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", "requires": { "async-limiter": "~1.0.0" } diff --git a/docs/package.json b/docs/package.json index 89798cf289..be723be823 100644 --- a/docs/package.json +++ b/docs/package.json @@ -4,10 +4,10 @@ "description": "Tendermint Core Documentation", "main": "index.js", "dependencies": { - "vuepress-theme-cosmos": "^1.0.182" + "vuepress-theme-cosmos": "^1.0.183" }, "devDependencies": { - "watchpack": "^2.2.0" + "watchpack": "^2.3.1" }, "scripts": { "preserve": "./pre.sh", diff --git a/docs/rfc/README.md b/docs/rfc/README.md index c3adfa08a2..1517db2f5e 100644 --- a/docs/rfc/README.md +++ b/docs/rfc/README.md @@ -43,5 +43,10 @@ sections. - [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) - [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) - [RFC-005: Event System](./rfc-005-event-system.rst) +- [RFC-006: Event Subscription](./rfc-006-event-subscription.md) +- [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md) +- [RFC-008: Don't Panic](./rfc-008-don't-panic.md) +- [RFC-009: Consensus Parameter Upgrades](./rfc-009-consensus-parameter-upgrades.md) +- [RFC-010: P2P Light Client](./rfc-010-p2p-light-client.rst) diff --git a/docs/rfc/rfc-006-event-subscription.md b/docs/rfc/rfc-006-event-subscription.md new file mode 100644 index 0000000000..4372f8d287 --- /dev/null +++ b/docs/rfc/rfc-006-event-subscription.md @@ -0,0 +1,204 @@ +# RFC 006: Event Subscription + +## Changelog + +- 30-Oct-2021: Initial draft (@creachadair) + +## Abstract + +The Tendermint consensus node allows clients to subscribe to its event stream +via methods on its RPC service. The ability to view the event stream is +valuable for clients, but the current implementation has some deficiencies that +make it difficult for some clients to use effectively. This RFC documents these +issues and discusses possible approaches to solving them. + + +## Background + +A running Tendermint consensus node exports a [JSON-RPC service][rpc-service] +that provides a [large set of methods][rpc-methods] for inspecting and +interacting with the node. One important cluster of these methods are the +`subscribe`, `unsubscribe`, and `unsubscribe_all` methods, which permit clients +to subscribe to a filtered stream of the [events generated by the node][events] +as it runs. + +Unlike the other methods of the service, the methods in the "event +subscription" cluster are not accessible via [ordinary HTTP GET or POST +requests][rpc-transport], but require upgrading the HTTP connection to a +[websocket][ws]. This is necessary because the `subscribe` request needs a +persistent channel to deliver results back to the client, and an ordinary HTTP +connection does not reliably persist across multiple requests. Since these +methods do not work properly without a persistent channel, they are _only_ +exported via a websocket connection, and are not routed for plain HTTP. + + +## Discussion + +There are some operational problems with the current implementation of event +subscription in the RPC service: + +- **Event delivery is not valid JSON-RPC.** When a client issues a `subscribe` + request, the server replies (correctly) with an initial empty acknowledgement + (`{}`). After that, each matching event is delivered "unsolicited" (without + another request from the client), as a separate [response object][json-response] + with the same ID as the initial request. + + This matters because it means a standard JSON-RPC client library can't + interact correctly with the event subscription mechanism. + + Even for clients that can handle unsolicited values pushed by the server, + these responses are invalid: They have an ID, so they cannot be treated as + [notifications][json-notify]; but the ID corresponds to a request that was + already completed. In practice, this means that general-purpose JSON-RPC + libraries cannot use this method correctly -- it requires a custom client. + + The Go RPC client from the Tendermint core can support this case, but clients + in other languages have no easy solution. + + This is the cause of issue [#2949][issue2949]. + +- **Subscriptions are terminated by disconnection.** When the connection to the + client is interrupted, the subscription is silently dropped. + + This is a reasonable behavior, but it matters because a client whose + subscription is dropped gets no useful error feedback, just a closed + connection. Should they try again? Is the node overloaded? Was the client + too slow? Did the caller forget to respond to pings? Debugging these kinds + of failures is unnecessarily painful. + + Websockets compound this, because websocket connections time out if no + traffic is seen for a while, and keeping them alive requires active + cooperation between the client and server. With a plain TCP socket, liveness + is handled transparently by the keepalive mechanism. On a websocket, + however, one side has to occasionally send a PING (if the connection is + otherwise idle). The other side must return a matching PONG in time, or the + connection is dropped. Apart from being tedious, this is highly susceptible + to CPU load. + + The Tendermint Go implementation automatically sends and responds to pings. + Clients in other languages (or not wanting to use the Tendermint libraries) + need to handle it explicitly. This burdens the client for no practical + benefit: A subscriber has no information about when matching events may be + available, so it shouldn't have to participate in keeping the connection + alive. + +- **Mismatched load profiles.** Most of the RPC service is mainly important for + low-volume local use, either by the application the node serves (e.g., the + ABCI methods) or by the node operator (e.g., the info methods). Event + subscription is important for remote clients, and may represent a much higher + volume of traffic. + + This matters because both are using the same JSON-RPC mechanism. For + low-volume local use, the ergonomics of JSON-RPC are a good fit: It's easy to + issue queries from the command line (e.g., using `curl`) or to write scripts + that call the RPC methods to monitor the running node. + + For high-volume remote use, JSON-RPC is not such a good fit: Even leaving + aside the non-standard delivery protocol mentioned above, the time and memory + cost of encoding event data matters for the stability of the node when there + can be potentially hundreds of subscribers. Moreover, a subscription is + long-lived compared to most RPC methods, in that it may persist as long the + node is active. + +- **Mismatched security profiles.** The RPC service exports several methods + that should not be open to arbitrary remote callers, both for correctness + reasons (e.g., `remove_tx` and `broadcast_tx_*`) and for operational + stability reasons (e.g., `tx_search`). A node may still need to expose + events, however, to support UI tools. + + This matters, because all the methods share the same network endpoint. While + it is possible to block the top-level GET and POST handlers with a proxy, + exposing the `/websocket` handler exposes not _only_ the event subscription + methods, but the rest of the service as well. + +### Possible Improvements + +There are several things we could do to improve the experience of developers +who need to subscribe to events from the consensus node. These are not all +mutually exclusive. + +1. **Split event subscription into a separate service**. Instead of exposing + event subscription on the same endpoint as the rest of the RPC service, + dedicate a separate endpoint on the node for _only_ event subscription. The + rest of the RPC services (_sans_ events) would remain as-is. + + This would make it easy to disable or firewall outside access to sensitive + RPC methods, without blocking access to event subscription (and vice versa). + This is probably worth doing, even if we don't take any of the other steps + described here. + +2. **Use a different protocol for event subscription.** There are various ways + we could approach this, depending how much we're willing to shake up the + current API. Here are sketches of a few options: + + - Keep the websocket, but rework the API to be more JSON-RPC compliant, + perhaps by converting event delivery into notifications. This is less + up-front change for existing clients, but retains all of the existing + implementation complexity, and doesn't contribute much toward more serious + performance and UX improvements later. + + - Switch from websocket to plain HTTP, and rework the subscription API to + use a more conventional request/response pattern instead of streaming. + This is a little more up-front work for existing clients, but leverages + better library support for clients not written in Go. + + The protocol would become more chatty, but we could mitigate that with + batching, and in return we would get more control over what to do about + slow clients: Instead of simply silently dropping them, as we do now, we + could drop messages and signal the client that they missed some data ("M + dropped messages since your last poll"). + + This option is probably the best balance between work, API change, and + benefit, and has a nice incidental effect that it would be easier to debug + subscriptions from the command-line, like the other RPC methods. + + - Switch to gRPC: Preserves a persistent connection and gives us a more + efficient binary wire format (protobuf), at the cost of much more work for + clients and harder debugging. This may be the best option if performance + and server load are our top concerns. + + Given that we are currently using JSON-RPC, however, I'm not convinced the + costs of encoding and sending messages on the event subscription channel + are the limiting factor on subscription efficiency, however. + +3. **Delegate event subscriptions to a proxy.** Give responsibility for + managing event subscription to a proxy that runs separately from the node, + and switch the node to push events to the proxy (like a webhook) instead of + serving subscribers directly. This is more work for the operator (another + process to configure and run) but may scale better for big networks. + + I mention this option for completeness, but making this change would be a + fairly substantial project. If we want to consider shifting responsibility + for event subscription outside the node anyway, we should probably be more + systematic about it. For a more principled approach, see point (4) below. + +4. **Move event subscription downstream of indexing.** We are already planning + to give applications more control over event indexing. By extension, we + might allow the application to also control how events are filtered, + queried, and subscribed. Having the application control these concerns, + rather than the node, might make life easier for developers building UI and + tools for that application. + + This is a much larger change, so I don't think it is likely to be practical + in the near-term, but it's worth considering as a broader option. Some of + the existing code for filtering and selection could be made more reusable, + so applications would not need to reinvent everything. + + +## References + +- [Tendermint RPC service][rpc-service] +- [Tendermint RPC routes][rpc-methods] +- [Discussion of the event system][events] +- [Discussion about RPC transport options][rpc-transport] (from RFC 002) +- [RFC 6455: The websocket protocol][ws] +- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification) + +[rpc-service]: https://docs.tendermint.com/master/rpc/ +[rpc-methods]: https://github.com/tendermint/tendermint/blob/master/internal/rpc/core/routes.go#L12 +[events]: ./rfc-005-event-system.rst +[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[json-response]: https://www.jsonrpc.org/specification#response_object +[json-notify]: https://www.jsonrpc.org/specification#notification +[issue2949]: https://github.com/tendermint/tendermint/issues/2949 diff --git a/docs/rfc/rfc-007-deterministic-proto-bytes.md b/docs/rfc/rfc-007-deterministic-proto-bytes.md new file mode 100644 index 0000000000..0b55c22283 --- /dev/null +++ b/docs/rfc/rfc-007-deterministic-proto-bytes.md @@ -0,0 +1,140 @@ +# RFC 007 : Deterministic Proto Byte Serialization + +## Changelog + +- 09-Dec-2021: Initial draft (@williambanfield). + +## Abstract + +This document discusses the issue of stable byte-representation of serialized messages +within Tendermint and describes a few possible routes that could be taken to address it. + +## Background + +We use the byte representations of wire-format proto messages to produce +and verify hashes of data within the Tendermint codebase as well as for +producing and verifying cryptographic signatures over these signed bytes. + +The protocol buffer [encoding spec][proto-spec-encoding] does not guarantee that the byte representation +of a protocol buffer message will be the same between two calls to an encoder. +While there is a mode to force the encoder to produce the same byte representation +of messages within a single binary, these guarantees are not good enough for our +use case in Tendermint. We require multiple different versions of a binary running +Tendermint to be able to inter-operate. Additionally, we require that multiple different +systems written in _different languages_ be able to participate in different aspects +of the protocols of Tendermint and be able to verify the integrity of the messages +they each produce. + +While this has not yet created a problem that we know of in a running network, we should +make sure to provide stronger guarantees around the serialized representation of the messages +used within the Tendermint consensus algorithm to prevent any issue from occurring. + + +## Discussion + +Proto has the following points of variability that can produce non-deterministic byte representation: + +1. Encoding order of fields within a message. + +Proto allows fields to be encoded in any order and even be repeated. + +2. Encoding order of elements of a repeated field. + +`repeated` fields in a proto message can be serialized in any order. + +3. Presence or absence of default values. + +Types in proto have defined default values similar to Go's zero values. +Writing or omitting a default value are both legal ways of encoding a wire message. + +4. Serialization of 'unknown' fields. + +Unknown fields can be present when a message is created by a binary with a newer +version of the proto that contains fields that the deserializer in a different +binary does not yet know about. Deserializers in binaries that do not know about the field +will maintain the bytes of the unknown field but not place them into the deserialized structure. + +We have a few options to consider when producing this stable representation. + +### Options for deterministic byte representation + +#### Use only compliant serializers and constrain field usage + +According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple +set of rules, gogoproto produces a consistent byte representation of serialized messages. +This seems promising, although more research is needed to guarantee gogoproto always +produces a consistent set of bytes on serialized messages. This would solve the problem +within Tendermint as written in Go, but would require ensuring that there are similar +serializers written in other languages that produce the same output as gogoproto. + +#### Reorder serialized bytes to ensure determinism. + +The serialized form of a proto message can be transformed into a canonical representation +by applying simple rules to the serialized bytes. Re-ordering the serialized bytes +would allow Tendermint to produce a canonical byte representation without having to +simultaneously maintain a custom proto marshaller. + +This could be implemented as a function in many languages that performed the following +producing bytes to sign or hashing: + +1. Does not add any of the data from unknown fields into the type to hash. + +Tendermint should not run into a case where it needs to verify the integrity of +data with unknown fields for the following reasons: + +The purpose of checking hash equality within Tendermint is to ensure that +its local copy of data matches the data that the network agreed on. There should +therefore not be a case where a process is checking hash equality using data that it did not expect +to receive. What the data represent may be opaque to the process, such as when checking the +transactions in a block, _but the process will still have expected to receive this data_, +despite not understanding what their internal structure is. It's not clear what it would +mean to verify that a block contains data that a process does not know about. + +The same reasoning applies for signature verification within Tendermint. Processes +verify that a digital signature signed over a set of bytes by locally reconstructing the +data structure that the digital signature signed using the process's local data. + +2. Reordered all message fields to be in tag-sorted order. + +Tag-sorting top-level fields will place all fields of the same tag in a adjacent +to eachother within the serialized representation. + +3. Reordered the contents of all `repeated` fields to be in lexicographically sorted order. + +`repeated` fields will appear in a message as having the same tag but will contain different +contents. Therefore, lexicographical sorting will produce a stable ordering of +fields with the same tag. + +4. Deleted all default values from the byte representation. + +Encoders can include default values or omit them. Most encoders appear to omit them +but we may wish to delete them just to be safe. + +5. Recursively performed these operations on any length-delimited subfields. + +Length delimited fields may contain messages, strings, or just bytes. However, +it's not possible to know what data is being represented by such a field. +A 'string' may happen to have the same structure as an embedded message and we cannot +disambiguate. For this reason, we must apply these same rules to all subfields that +may contain messages. Because we cannot know if we have totally mangled the interior 'string' +or not, this data should never be deserialized or used for anything beyond hashing. + +A **prototype** implementation by @creachadair of this can be found in [the wirepb repo][wire-pb]. +This could be implemented in multiple languages more simply than ensuring that there are +canonical proto serializers that match in each language. + +### Future work + +We should add clear documentation to the Tendermint codebase every time we +compare hashes of proto messages or use proto serialized bytes to produces a +digital signatures that we have been careful to ensure that the hashes are performed +properly. + +### References + +[proto-spec-encoding]: https://developers.google.com/protocol-buffers/docs/encoding +[spec-issue]: https://github.com/tendermint/tendermint/issues/5005 +[cosmos-sdk-adr-27]: https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-027-deterministic-protobuf-serialization.md +[cer-proto-3]: https://github.com/regen-network/canonical-proto3 +[wire-pb]: https://github.com/creachadair/wirepb + diff --git a/docs/rfc/rfc-008-don't-panic.md b/docs/rfc/rfc-008-don't-panic.md new file mode 100644 index 0000000000..ec8c08f5e7 --- /dev/null +++ b/docs/rfc/rfc-008-don't-panic.md @@ -0,0 +1,139 @@ +# RFC 008: Don't Panic + +## Changelog + +- 2021-12-17: initial draft (@tychoish) + +## Abstract + +Today, the Tendermint core codebase has panics in a number of cases as +a response to exceptional situations. These panics complicate testing, +and might make tendermint components difficult to use as a library in +some circumstances. This document outlines a project of converting +panics to errors and describes the situations where its safe to +panic. + +## Background + +Panics in Go are a great mechanism for aborting the current execution +for truly exceptional situations (e.g. memory errors, data corruption, +processes initialization); however, because they resemble exceptions +in other languages, it can be easy to over use them in the +implementation of software architectures. This certainly happened in +the history of Tendermint, and as we embark on the project of +stabilizing the package, we find ourselves in the right moment to +reexamine our use of panics, and largely where panics happen in the +code base. + +There are still some situations where panics are acceptable and +desireable, but it's important that Tendermint, as a project, comes to +consensus--perhaps in the text of this document--on the situations +where it is acceptable to panic. + +### References + +- [Defer Panic and Recover](https://go.dev/blog/defer-panic-and-recover) +- [Why Go gets exceptions right](https://dave.cheney.net/tag/panic) +- [Don't panic](https://dave.cheney.net/practical-go/presentations/gophercon-singapore-2019.html#_dont_panic) + +## Discussion + +### Acceptable Panics + +#### Initialization + +It is unambiguously safe (and desireable) to panic in `init()` +functions in response to any kind of error. These errors are caught by +tests, and occur early enough in process initialization that they +won't cause unexpected runtime crashes. + +Other code that is called early in process initialization MAY panic, +in some situations if it's not possible to return an error or cause +the process to abort early, although these situations should be +vanishingly slim. + +#### Data Corruption + +If Tendermint code encounters an inconsistency that could be +attributed to data corruption or a logical impossibility it is safer +to panic and crash the process than continue to attempt to make +progress in these situations. + +Examples including reading data out of the storage engine that +is invalid or corrupt, or encountering an ambiguous situation where +the process should halt. Generally these forms of corruption are +detected after interacting with a trusted but external data source, +and reflect situations where the author thinks its safer to terminate +the process immediately rather than allow execution to continue. + +#### Unrecoverable Consensus Failure + +In general, a panic should be used in the case of unrecoverable +consensus failures. If a process detects that the network is +behaving in an incoherent way and it does not have a clearly defined +and mechanism for recovering, the process should panic. + +#### Static Validity + +It is acceptable to panic for invariant violations, within a library +or package, in situations that should be statically impossible, +because there is no way to make these kinds of assertions at compile +time. + +For example, type-asserting `interface{}` values returned by +`container/list` and `container/heap` (and similar), is acceptable, +because package authors should have exclusive control of the inputs to +these containers. Packages should not expose the ability to add +arbitrary values to these data structures. + +#### Controlled Panics Within Libraries + +In some algorithms with highly recursive structures or very nested +call patterns, using a panic, in combination with conditional recovery +handlers results in more manageable code. Ultimately this is a limited +application, and implementations that use panics internally should +only recover conditionally, filtering out panics rather than ignoring +or handling all panics. + +#### Request Handling + +Code that handles responses to incoming/external requests +(e.g. `http.Handler`) should avoid panics, but practice this isn't +totally possible, and it makes sense that request handlers have some +kind of default recovery mechanism that will prevent one request from +terminating a service. + +### Unacceptable Panics + +In **no** other situation is it acceptable for the code to panic: + +- there should be **no** controlled panics that callers are required + to handle across library/package boundaries. +- callers of library functions should not expect panics. +- ensuring that arbitrary go routines can't panic. +- ensuring that there are no arbitrary panics in core production code, + espically code that can run at any time during the lifetime of a + process. +- all test code and fixture should report normal test assertions with + a mechanism like testify's `require` assertion rather than calling + panic directly. + +The goal of this increased "panic rigor" is to ensure that any escaped +panic is reflects a fixable bug in Tendermint. + +### Removing Panics + +The process for removing panics involve a few steps, and will be part +of an ongoing process of code modernization: + +- converting existing explicit panics to errors in cases where it's + possible to return an error, the errors can and should be handled, and returning + an error would not lead to data corruption or cover up data + corruption. + +- increase rigor around operations that can cause runtime errors, like + type assertions, nil pointer errors, array bounds access issues, and + either avoid these situations or return errors where possible. + +- remove generic panic handlers which could cover and hide known + panics. diff --git a/docs/rfc/rfc-009-consensus-parameter-upgrades.md b/docs/rfc/rfc-009-consensus-parameter-upgrades.md new file mode 100644 index 0000000000..60be878df1 --- /dev/null +++ b/docs/rfc/rfc-009-consensus-parameter-upgrades.md @@ -0,0 +1,128 @@ +# RFC 009 : Consensus Parameter Upgrade Considerations + +## Changelog + +- 06-Jan-2011: Initial draft (@williambanfield). + +## Abstract + +This document discusses the challenges of adding additional consensus parameters +to Tendermint and proposes a few solutions that can enable addition of consensus +parameters in a backwards-compatible way. + +## Background + +This section provides an overview of the issues of adding consensus parameters +to Tendermint. + +### Hash Compatibility + +Tendermint produces a hash of a subset of the consensus parameters. The values +that are hashed currently are the `BlockMaxGas` and the `BlockMaxSize`. These +are currently in the [HashedParams struct][hashed-params]. This hash is included +in the block and validators use it to validate that their local view of the consensus +parameters matches what the rest of the network is configured with. + +Any new consensus parameters added to Tendermint should be included in this +hash. This presents a challenge for verification of historical blocks when consensus +parameters are added. If a network produced blocks with a version of Tendermint that +did not yet have the new consensus parameters, the parameter hash it produced will +not reference the new parameters. Any nodes joining the network with the newer +version of Tendermint will have the new consensus parameters. Tendermint will need +to handle this case so that new versions of Tendermint with new consensus parameters +can still validate old blocks correctly without having to do anything overly complex +or hacky. + +### Allowing Developer-Defined Values and the `EndBlock` Problem + +When new consensus parameters are added, application developers may wish to set +values for them so that the developer-defined values may be used as soon as the +software upgrades. We do not currently have a clean mechanism for handling this. + +Consensus parameter updates are communicated from the application to Tendermint +within `EndBlock` of some height `H` and take effect at the next height, `H+1`. +This means that for updates that add a consensus parameter, there is a single +height where the new parameters cannot take effect. The parameters did not exist +in the version of the software that emitted the `EndBlock` response for height `H-1`, +so they cannot take effect at height `H`. The first height that the updated params +can take effect is height `H+1`. As of now, height `H` must run with the defaults. + +## Discussion + +### Hash Compatibility + +This section discusses possible solutions to the problem of maintaining backwards-compatibility +of hashed parameters while adding new parameters. + +#### Never Hash Defaults + +One solution to the problem of backwards-compatibility is to never include parameters +in the hash if the are using the default value. This means that blocks produced +before the parameters existed will have implicitly been created with the defaults. +This works because any software with newer versions of Tendermint must be using the +defaults for new parameters when validating old blocks since the defaults can not +have been updated until a height at which the parameters existed. + +#### Only Update HashedParams on Hash-Breaking Releases + +An alternate solution to never hashing defaults is to not update the hashed +parameters on non-hash-breaking releases. This means that when new consensus +parameters are added to Tendermint, there may be a release that makes use of the +parameters but does not verify that they are the same across all validators by +referencing them in the hash. This seems reasonably safe given the fact that +only a very far subset of the consensus parameters are currently verified at all. + +#### Version The Consensus Parameter Hash Scheme + +The upcoming work on [soft upgrades](https://github.com/tendermint/spec/pull/222) +proposes applying different hashing rules depending on the active block version. +The consensus parameter hash could be versioned in the same way. When different +block versions are used, a different set of consensus parameters will be included +in the hash. + +### Developer Defined Values + +This section discusses possible solutions to the problem of allowing application +developers to define values for the new parameters during the upgrade that adds +the parameters. + +#### Using `InitChain` for New Values + +One solution to the problem of allowing application developers to define values +for new consensus parameters is to call the `InitChain` ABCI method on application +startup and fetch the value for any new consensus parameters. The [response object][init-chain-response] +contains a field for `ConsensusParameter` updates so this may serve as a natural place +to put this logic. + +This poses a few difficulties. Nodes replaying old blocks while running new +software do not ever call `InitChain` after the initial time. They will therefore +not have a way to determine that the parameters changed at some height by using a +call to `InitChain`. The `EndBlock` response is how parameter changes at a height +are currently communicated to Tendermint and conflating these cases seems risky. + +#### Force Defaults For Single Height + +An alternate option is to not use `InitChain` and instead require chains to use the +default values of the new parameters for a single height. + +As documented in the upcoming [ADR-74][adr-74], popular chains often simply use the default +values. Additionally, great care is being taken to ensure that logic governed by upcoming +consensus parameters is not liveness-breaking. This means that, at worst-case, +chains will experience a single slow height while waiting for the new values to +by applied. + +#### Add a new `UpgradeChain` method + +An additional method for allowing chains to update the consensus parameters that +do not yet exist is to add a new `UpgradeChain` method to `ABCI`. The upgrade chain +method would be called when the chain detects that the version of block that it +is about to produce does not match the previous block. This method would be called +after `EndBlock` and would return the set of consensus parameters to use at the +next height. It would therefore give an application the chance to set the new +consensus parameters before running a height with these new parameter. + +### References + +[hashed-params]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/types/params.go#L49 +[init-chain-response]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/abci/types/types.pb.go#L1616 +[adr-74]: https://github.com/tendermint/tendermint/pull/7503 diff --git a/docs/rfc/rfc-010-p2p-light-client.rst b/docs/rfc/rfc-010-p2p-light-client.rst new file mode 100644 index 0000000000..b5f465589f --- /dev/null +++ b/docs/rfc/rfc-010-p2p-light-client.rst @@ -0,0 +1,145 @@ +================================== +RFC 010: Peer to Peer Light Client +================================== + +Changelog +--------- + +- 2022-01-21: Initial draft (@tychoish) + +Abstract +-------- + +The dependency on access to the RPC system makes running or using the light +client more complicated than it should be, because in practice node operators +choose to restrict access to these end points (often correctly.) There is no +deep dependency for the light client on the RPC system, and there is a +persistent notion that "make a p2p light client" is a solution to this +operational limitation. This document explores the implications and +requirements of implementing a p2p-based light client, as well as the +possibilities afforded by this implementation. + +Background +---------- + +High Level Design +~~~~~~~~~~~~~~~~~ + +From a high level, the light client P2P implementation, is relatively straight +forward, but is orthogonal to the P2P-backed statesync implementation that +took place during the 0.35 cycle. The light client only really needs to be +able to request (and receive) a `LightBlock` at a given height. To support +this, a new Reactor would run on every full node and validator which would be +able to service these requests. The workload would be entirely +request-response, and the implementation of the reactor would likely be very +straight forward, and the implementation of the provider is similarly +relatively simple. + +The complexity of the project focuses around peer discovery, handling when +peers disconnect from the light clients, and how to change the current P2P +code to appropriately handle specialized nodes. + +I believe it's safe to assume that much of the current functionality of the +current ``light`` mode would *not* need to be maintained: there is no need to +proxy the RPC endpoints over the P2P layer and there may be no need to run a +node/process for the p2p light client (e.g. all use of this will be as a +client.) + +The ability to run light clients using the RPC system will continue to be +maintained. + +LibP2P +~~~~~~ + +While some aspects of the P2P light client implementation are orthogonal to +LibP2P project, it's useful to think about the ways that these efforts may +combine or interact. + +We expect to be able to leverage libp2p tools to provide some kind of service +discovery for tendermint-based networks. This means that it will be possible +for the p2p stack to easily identify specialized nodes, (e.g. light clients) +thus obviating many of the design challenges with providing this feature in +the context of the current stack. + +Similarly, libp2p makes it possible for a project to be able back their non-Go +light clients, without the major task of first implementing Tendermint's p2p +connection handling. We should identify if there exist users (e.g. the go IBC +relayer, it's maintainers, and operators) who would be able to take advantage +of p2p light client, before switching to libp2p. To our knowledge there are +limited implementations of this p2p protocol (a simple implementation without +secret connection support exists in rust but it has not been used in +production), and it seems unlikely that a team would implement this directly +ahead of its impending removal. + +User Cases +~~~~~~~~~~ + +This RFC makes a few assumptions about the use cases and users of light +clients in tendermint. + +The most active and delicate use cases for light clients is in the +implementation of the IBC relayer. Thus, we expect that providing P2P light +clients might increase the reliability of relayers and reduce the cost of +running a relayer, because relayer operators won't have to decide between rely +on public RPC endpoints (unreliable) or running their own full nodes +(expensive.) This also assumes that there are *no* other uses of the RPC in +the relayer, and unless the relayers have the option of dropping all RPC use, +it's unclear if a P2P light client will actually be able to successfully +remove the dependency on the RPC system. + +Given that the primary relayer implementation is Hermes (rust,) it might be +safe to deliver a version of Tendermint that adds a light client rector in +the full nodes, but that does not provide an implementation of a Go light +client. This either means that the rust implementation would need support for +the legacy P2P connection protocol or wait for the libp2p implementation. + +Client side light client (e.g. wallets, etc.) users may always want to use (a +subset) of the RPC rather than connect to the P2P network for an ephemeral +use. + +Discussion +---------- + +Implementation Questions +~~~~~~~~~~~~~~~~~~~~~~~~ + +Most of the complication in the is how to have a long lived light client node +that *only* runs the light client reactor, as this raises a few questions: + +- would users specify a single P2P node to connect to when creating a light + client or would they also need/want to discover peers? + + - **answer**: most light client use cases won't care much about selecting + peers (and those that do can either disable PEX and specify persistent + peers, *or* use the RPC light client.) + +- how do we prevent full nodes and validators from allowing their peer slots, + which are typically limited, from filling with light clients? If + light-clients aren't limited, how do we prevent light clients from consuming + resources on consensus nodes? + + - **answer**: I think we can institute an internal cap on number of light + client connections to accept and also elide light client nodes from PEX + (pre-libp2p, if we implement this.) I believe that libp2p should provide + us with the kind of service discovery semantics for network connectivity + that would obviate this issue. + +- when a light client disconnects from its peers will it need to reset its + internal state (cache)? does this change if it connects to the same peers? + + - **answer**: no, the internal state only needs to be reset if the light + client detects an invalid block or other divergence, and changing + witnesses--which will be more common with a p2p light client--need not + invalidate the cache. + +These issues are primarily present given that the current peer management later +does not have a particularly good service discovery mechanism nor does it have +a very sophisticated way of identifying nodes of different types or modes. + +Report Evidence +~~~~~~~~~~~~~~~ + +The current light client implementation currently has the ability to report +observed evidence. Either the notional light client reactor needs to be able +to handle these kinds of requests *or* all light client nodes need to also run +the evidence reactor. This could be configured at runtime. diff --git a/docs/roadmap/README.md b/docs/roadmap/README.md new file mode 100644 index 0000000000..bd9280c454 --- /dev/null +++ b/docs/roadmap/README.md @@ -0,0 +1,6 @@ +--- +order: false +parent: + title: Roadmap + order: 7 +--- \ No newline at end of file diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index e6e5a32fe3..19d9d89fbf 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -1,8 +1,5 @@ --- -order: false -parent: - title: Roadmap - order: 7 +order: 1 --- # Tendermint Roadmap diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index f83349db28..d8af4a3d1c 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -13,9 +13,12 @@ This section dives into the internals of Go-Tendermint. - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) -- [Block Sync](./block-sync.md) -- [State Sync](./state-sync.md) -- [Mempool](./mempool.md) +- [Block Sync](./block-sync/README.md) +- [State Sync](./state-sync/README.md) +- [Mempool](./mempool/README.md) - [Light Client](./light-client.md) +- [Consensus](./consensus/README.md) +- [Peer Exchange (PEX)](./pex/README.md) +- [Evidence](./evidence/README.md) -For full specifications refer to the [spec repo](https://github.com/tendermint/spec). \ No newline at end of file +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/block-sync.md b/docs/tendermint-core/block-sync/README.md similarity index 93% rename from docs/tendermint-core/block-sync.md rename to docs/tendermint-core/block-sync/README.md index 43e849fcc0..3ffb0953d2 100644 --- a/docs/tendermint-core/block-sync.md +++ b/docs/tendermint-core/block-sync/README.md @@ -1,7 +1,11 @@ --- -order: 10 +order: 1 +parent: + title: Block Sync + order: 6 --- + # Block Sync *Formerly known as Fast Sync* @@ -61,3 +65,7 @@ another event for exposing the fast-sync `complete` status and the state `height The user can query the events by subscribing `EventQueryBlockSyncStatus` Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. + +## Implementation + +To read more on the implamentation please see the [reactor doc](./reactor.md) and the [implementation doc](./implementation.md) diff --git a/docs/tendermint-core/block-sync/img/bc-reactor-routines.png b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png new file mode 100644 index 0000000000..3f574a79b1 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor-routines.png differ diff --git a/docs/tendermint-core/block-sync/img/bc-reactor.png b/docs/tendermint-core/block-sync/img/bc-reactor.png new file mode 100644 index 0000000000..f7fe0f8193 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/bc-reactor.png differ diff --git a/docs/tendermint-core/block-sync/implementation.md b/docs/tendermint-core/block-sync/implementation.md new file mode 100644 index 0000000000..59274782cd --- /dev/null +++ b/docs/tendermint-core/block-sync/implementation.md @@ -0,0 +1,47 @@ +--- +order: 3 +--- + +# Implementation + +## Blocksync Reactor + +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation + +### poolRoutine + +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends + a &bcBlockRequestMessage for a specific height + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + them on disk +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer + +## Block Pool + +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange + +## Go Routines in Blocksync Reactor + +![Go Routines Diagram](img/bc-reactor-routines.png) diff --git a/docs/tendermint-core/block-sync/reactor.md b/docs/tendermint-core/block-sync/reactor.md new file mode 100644 index 0000000000..3e28753403 --- /dev/null +++ b/docs/tendermint-core/block-sync/reactor.md @@ -0,0 +1,278 @@ +--- +order: 2 +--- +# Reactor + +The Blocksync Reactor's high level responsibility is to enable peers who are +far behind the current state of the consensus to quickly catch up by downloading +many blocks in parallel, verifying their commits, and executing them against the +ABCI application. + +Tendermint full nodes run the Blocksync Reactor as a service to provide blocks +to new nodes. New nodes run the Blocksync Reactor in "fast_sync" mode, +where they actively make requests for more blocks until they sync up. +Once caught up, "fast_sync" mode is disabled and the node switches to +using (and turns on) the Consensus Reactor. + +## Architecture and algorithm + +The Blocksync reactor is organised as a set of concurrent tasks: + +- Receive routine of Blocksync Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. + +![Blocksync Reactor Architecture Diagram](img/bc-reactor.png) + +### Data structures + +These are the core data structures necessarily to provide the Blocksync Reactor logic. + +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. + +```go +type Requester { + mtx Mutex + block Block + height int64 + peerID p2p.ID + redoChannel chan p2p.ID //redo may send multi-time; peerId is used to identify repeat +} +``` + +Pool is a core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +```go +type Pool { + mtx Mutex + requesters map[int64]*Requester + height int64 + peers map[p2p.ID]*Peer + maxPeerHeight int64 + numPending int32 + store BlockStore + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError +} +``` + +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. + +```go +type Peer struct { + id p2p.ID + height int64 + numPending int32 + timeout *time.Timer + didTimeout bool +} +``` + +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + +```go +type BlockRequest { + Height int64 + PeerID p2p.ID +} +``` + +### Receive routine of Blocksync Reactor + +It is executed upon message reception on the BlocksyncChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blocksync Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. + +```go +handleMsg(pool, m): + upon receiving bcBlockRequestMessage m from peer p: + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then + requester.block = m + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + + upon receiving bcStatusRequestMessage m from peer p: + try to send bcStatusResponseMessage(pool.store.Height) + + upon receiving bcStatusResponseMessage m from peer p: + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + +onTimeout(p): + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true +``` + +### Requester tasks + +Requester task is responsible for fetching a single block at position `height`. + +```go +fetchBlock(height, pool): + while true do { + peerID = nil + block = nil + peer = pickAvailablePeer(height) + peerID = peer.id + + enqueue BlockRequest(height, peerID) to pool.requestsChannel + redo = false + while !redo do + select { + upon receiving Quit message do + return + upon receiving redo message with id on redoChannel do + if peerID == id { + mtx.Lock() + pool.numPending++ + redo = true + mtx.UnLock() + } + } + } + +pickAvailablePeer(height): + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer +``` + +sleep for requestIntervalMS + +### Task for creating Requesters + +This task is responsible for continuously creating and starting Requester tasks. + +```go +createRequesters(pool): + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + pool.mtx.Lock() + nextHeight = pool.height + size(pool.requesters) + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else + sleep requestIntervalMS + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel + peer.didTimeout = true + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then + enqueue msg on requestor's redoChannel + delete(pool.peers, peerID) + pool.mtx.Unlock() +``` + +### Main blocksync reactor controller task + +```go +main(pool): + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interval switchToConsensusIntervalSeconds + + while true do + select { + upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: + try to send bcBlockRequestMessage(Height) to Peer + + upon receiving error(peer) on errorsChannel: + stop peer for error + + upon receiving message on statusUpdateTickerChannel: + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + + upon receiving message on switchToConsensusTickerChannel: + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 + pool.mtx.Unlock() + if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then + switch to consensus mode + + upon receiving message on trySyncTickerChannel: + for i = 0; i < 10; i++ do + pool.mtx.Lock() + firstBlock = pool.requesters[pool.height].block + secondBlock = pool.requesters[pool.height].block + if firstBlock == nil or secondBlock == nil then continue + pool.mtx.Unlock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() + peerID = pool.requesters[pool.height].peerID + redoRequestsForPeer(peerId) + delete(pool.peers, peerID) + stop peer peerID for error + pool.mtx.Unlock() + else + delete(pool.requesters, pool.height) + save firstBlock to store + pool.height++ + execute firstBlock + } + +redoRequestsForPeer(pool, peerId): + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester +``` + +## Channels + +Defines `maxMsgSize` for the maximum size of incoming messages, +`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and +receiving buffers respectively. These are supposed to prevent amplification +attacks by setting up the upper limit on how much data we can receive & send to +a peer. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md new file mode 100644 index 0000000000..bd7def551f --- /dev/null +++ b/docs/tendermint-core/consensus/README.md @@ -0,0 +1,42 @@ +--- +order: 1 +parent: + title: Consensus + order: 6 +--- + +# Consensus + +Tendermint Consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote +messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the +next block should be; a validator might vote with a `VoteMessage` for a different block. If in some +round, enough number of processes vote for the same block, then this block is committed and later +added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the +validator. The internals of the protocol and how it ensures safety and liveness properties are +explained in a forthcoming document. + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the +block as the block size is big, i.e., they don't embed the block inside `Proposal` and +`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in +[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +that uniquely identifies each block. The block itself is +disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a +proposer first splitting a block into a number of block parts, that are then gossiped between +processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As +part of the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping +protocol to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. diff --git a/docs/tendermint-core/consensus/reactor.md b/docs/tendermint-core/consensus/reactor.md new file mode 100644 index 0000000000..ee43846ece --- /dev/null +++ b/docs/tendermint-core/consensus/reactor.md @@ -0,0 +1,370 @@ +--- +order: 2 +--- + +# Reactor + +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +for decoding messages received from a peer and for adequate processing of the message depending on its type and content. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. + +## ConsensusState service + +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +and upon reaching agreement, commits blocks to the chain and executes them against the application. +The internal state machine receives input from peers, the internal validator and from a timer. + +Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. + +### Receive Routine of the ConsensusState service + +Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +discussed in separate document. For understanding of this document +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +then extensively used by the gossip routines to determine what information should be sent to peer processes. + +## Round State + +RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. + +```go +type RoundState struct { + Height int64 + Round int + Step RoundStepType + Validators ValidatorSet + Proposal Proposal + ProposalBlock Block + ProposalBlockParts PartSet + LockedRound int + LockedBlock Block + LockedBlockParts PartSet + Votes HeightVoteSet + LastCommit VoteSet + LastValidators ValidatorSet +} +``` + +Internally, consensus will run as a state machine with the following states: + +- RoundStepNewHeight +- RoundStepNewRound +- RoundStepPropose +- RoundStepProposeWait +- RoundStepPrevote +- RoundStepPrevoteWait +- RoundStepPrecommit +- RoundStepPrecommitWait +- RoundStepCommit + +## Peer Round State + +Peer round state contains the known state of a peer. It is being updated by the Receive routine of +Consensus Reactor and by the gossip routines upon sending a message to the peer. + +```golang +type PeerRoundState struct { + Height int64 // Height peer is at + Round int // Round peer is at, -1 if unknown. + Step RoundStepType // Step peer is at + Proposal bool // True if peer has proposal for this round + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray + ProposalPOLRound int // Proposal's POL round. -1 if none. + ProposalPOL BitArray // nil until ProposalPOLMessage received. + Prevotes BitArray // All votes peer has for this round + Precommits BitArray // All precommits peer has for this round + LastCommitRound int // Round of commit for last height. -1 if none. + LastCommit BitArray // All commit precommits of commit for last height. + CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. + CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound +} +``` + +## Receive method of Consensus reactor + +The entry point of the Consensus reactor is a receive method. When a message is +received from a peer p, normally the peer round state is updated +correspondingly, and some messages are passed for further processing, for +example to ConsensusState service. We now specify the processing of messages in +the receive method of Consensus reactor for each message type. In the following +message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, +respectively. + +### NewRoundStepMessage handler + +```go +handleMessage(msg): + if msg is from smaller height/round/step then return + // Just remember these values. + prsHeight = prs.Height + prsRound = prs.Round + prsCatchupCommitRound = prs.CatchupCommitRound + prsCatchupCommit = prs.CatchupCommit + + Update prs with values from msg + if prs.Height or prs.Round has been updated then + reset Proposal related fields of the peer state + if prs.Round has been updated and msg.Round == prsCatchupCommitRound then + prs.Precommits = psCatchupCommit + if prs.Height has been updated then + if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = prs.Precommits + } else { + prs.LastCommitRound = msg.LastCommitRound + prs.LastCommit = nil + } + Reset prs.CatchupCommitRound and prs.CatchupCommit +``` + +### NewValidBlockMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` + +The number of block parts is limited to 1601 (`types.MaxBlockPartsCount`) to +protect the node against DOS attacks. + +### HasVoteMessage handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +``` + +### VoteSetMaj23Message handler + +```go +handleMessage(msg): + if prs.Height == msg.Height then + Record in rs that a peer claim to have ⅔ majority for msg.BlockID + Send VoteSetBitsMessage showing votes node has for that BlockId +``` + +### ProposalMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + prs.Proposal = true + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalPOLRound = msg.POLRound + prs.ProposalPOL = nil + Send msg through internal peerMsgQueue to ConsensusState service +``` + +### ProposalPOLMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return + prs.ProposalPOL = msg.ProposalPOL +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +### BlockPartMessage handler + +```go +handleMessage(msg): + if prs.Height != msg.Height || prs.Round != msg.Round then return + Record in prs that peer has block part msg.Part.Index + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteMessage handler + +```go +handleMessage(msg): + Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round + Send msg trough internal peerMsgQueue to ConsensusState service +``` + +### VoteSetBitsMessage handler + +```go +handleMessage(msg): + Update prs for the bit-array of votes peer claims to have for the msg.BlockID +``` + +The number of votes is limited to 10000 (`types.MaxVotesCount`) to protect the +node against DOS attacks. + +## Gossip Data Routine + +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + Continue + +1b) if (0 < prs.Height) and (prs.Height < rs.Height) then + help peer catch up using gossipDataForCatchup function + Continue + +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then + Sleep PeerGossipSleepDuration + Continue + +// at this point rs.Height == prs.Height and rs.Round == prs.Round +1d) if (rs.Proposal != nil and !prs.Proposal) then + Send ProposalMessage(rs.Proposal) to the peer + if send returns true, record that the peer knows Proposal + if 0 <= rs.Proposal.POLRound then + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) + Continue + +2) Sleep PeerGossipSleepDuration +``` + +### Gossip Data For Catchup + +This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). +The function executes the following logic: + +```go + if peer does not have all block parts for prs.ProposalBlockPart then + blockMeta = Load Block Metadata for height prs.Height from blockStore + if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then + Sleep PeerGossipSleepDuration + return + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + if send returns true, record that the peer knows the corresponding block Part + return + else Sleep PeerGossipSleepDuration +``` + +## Gossip Votes Routine + +It is used to send the following message: `VoteMessage` on the VoteChannel. +The gossip votes routine is based on the local RoundState (`rs`) +and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: + +```go +1a) if rs.Height == prs.Height then + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + Prevotes = rs.Votes.Prevotes(prs.Round) + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + + if prs.ProposalPOLRound != -1 then + PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1b) if prs.Height != 0 and rs.Height == prs.Height+1 then + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue + +2) Sleep PeerGossipSleepDuration +``` + +## QueryMaj23Routine + +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState +(`prs`). The routine repeats forever the logic shown below. + +```go +1a) if rs.Height == prs.Height then + Prevotes = rs.Votes.Prevotes(prs.Round) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1b) if rs.Height == prs.Height then + Precommits = rs.Votes.Precommits(prs.Round) + if there is a ⅔ majority for some blockId in Precommits then + m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then + Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) + if there is a ⅔ majority for some blockId in Prevotes then + m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then + Commit = LoadCommit(prs.Height) + m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.BlockID) + Send m to peer + Sleep PeerQueryMaj23SleepDuration + +2) Sleep PeerQueryMaj23SleepDuration +``` + +## Broadcast routine + +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. +It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. + +## Channels + +Defines 4 channels: state, data, vote and vote_set_bits. Each channel +has `SendQueueCapacity` and `RecvBufferCapacity` and +`RecvMessageCapacity` set to `maxMsgSize`. + +Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/tendermint-core/evidence/README.md b/docs/tendermint-core/evidence/README.md new file mode 100644 index 0000000000..2070c48c03 --- /dev/null +++ b/docs/tendermint-core/evidence/README.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Evidence + order: 3 +--- + +Evidence is used to identify validators who have or are acting malicious. There are multiple types of evidence, to read more on the evidence types please see [Evidence Types](https://docs.tendermint.com/master/spec/core/data_structures.html#evidence). + +The evidence reactor works similar to the mempool reactor. When evidence is observed, it is sent to all the peers in a repetitive manner. This ensures evidence is sent to as many people as possible to avoid sensoring. After evidence is received by peers and committed in a block it is pruned from the evidence module. + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. diff --git a/docs/tendermint-core/mempool.md b/docs/tendermint-core/mempool.md deleted file mode 100644 index 8dd9687819..0000000000 --- a/docs/tendermint-core/mempool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -order: 12 ---- - -# Mempool - -## Transaction ordering - -Currently, there's no ordering of transactions other than the order they've -arrived (via RPC or from other nodes). - -So the only way to specify the order is to send them to a single node. - -valA: - -- `tx1` -- `tx2` -- `tx3` - -If the transactions are split up across different nodes, there's no way to -ensure they are processed in the expected order. - -valA: - -- `tx1` -- `tx2` - -valB: - -- `tx3` - -If valB is the proposer, the order might be: - -- `tx3` -- `tx1` -- `tx2` - -If valA is the proposer, the order might be: - -- `tx1` -- `tx2` -- `tx3` - -That said, if the transactions contain some internal value, like an -order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then -accept `tx1`. The sender can then retry sending `tx3`, which should probably be -rejected until the node has seen `tx2`. diff --git a/docs/tendermint-core/mempool/README.md b/docs/tendermint-core/mempool/README.md new file mode 100644 index 0000000000..1821cf8492 --- /dev/null +++ b/docs/tendermint-core/mempool/README.md @@ -0,0 +1,71 @@ +--- +order: 1 +parent: + title: Mempool + order: 2 +--- + +The mempool is a in memory pool of potentially valid transactions, +both to broadcast to other nodes, as well as to provide to the +consensus reactor when it is selected as the block proposer. + +There are two sides to the mempool state: + +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit + +## External functionality + +External functionality is exposed via network interfaces +to potentially untrusted actors. + +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check + +## Internal functionality + +Internal functionality is exposed via method calls to other +code compiled into the tendermint binary. + +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx + +What does it provide the consensus reactor? +What guarantees does it need from the ABCI app? +(talk about interleaving processes in concurrency) + +## Optimizations + +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. + +Applications should implement replay protection, read [Replay +Protection](https://github.com/tendermint/tendermint/blob/8cdaa7f515a9d366bbc9f0aff2a263a1a6392ead/docs/app-dev/app-development.md#replay-protection) for more information. + +## Configuration + +The mempool has various configurable paramet + +Sending incorrectly encoded data or data exceeding `maxMsgSize` will result +in stopping the peer. + +`maxMsgSize` equals `MaxBatchBytes` (10MB) + 4 (proto overhead). +`MaxBatchBytes` is a mempool config parameter -> defined locally. The reactor +sends transactions to the connected peers in batches. The maximum size of one +batch is `MaxBatchBytes`. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/docs/tendermint-core/mempool/config.md b/docs/tendermint-core/mempool/config.md new file mode 100644 index 0000000000..4e8a9ec73d --- /dev/null +++ b/docs/tendermint-core/mempool/config.md @@ -0,0 +1,105 @@ +--- +order: 2 +--- + +# Configuration + +Here we describe configuration options around mempool. +For the purposes of this document, they are described +in a toml file, but some of them can also be passed in as +environmental variables. + +Config: + +```toml +[mempool] + +recheck = true +broadcast = true +wal-dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 +``` + + + +## Recheck + +Recheck determines if the mempool rechecks all pending +transactions after a block was committed. Once a block +is committed, the mempool removes all valid transactions +that were successfully included in the block. + +If `recheck` is true, then it will rerun CheckTx on +all remaining transactions with the new block state. + +## Broadcast + +Determines whether this node gossips any valid transactions +that arrive in mempool. Default is to gossip anything that +passes checktx. If this is disabled, transactions are not +gossiped, but instead stored locally and added to the next +block this node is the proposer. + +## WalDir + +This defines the directory where mempool writes the write-ahead +logs. These files can be used to reload unbroadcasted +transactions if the node crashes. + +If the directory passed in is an absolute path, the wal file is +created there. If the directory is a relative path, the path is +appended to home directory of the tendermint process to +generate an absolute path to the wal directory +(default `$HOME/.tendermint` or set via `TM_HOME` or `--home`) + +## Size + +Size defines the total amount of transactions stored in the mempool. Default is `5_000` but can be adjusted to any number you would like. The higher the size the more strain on the node. + +## Max Transactions Bytes + +Max transactions bytes defines the total size of all the transactions in the mempool. Default is 1 GB. + +## Cache size + +Cache size determines the size of the cache holding transactions we have already seen. The cache exists to avoid running `checktx` each time we receive a transaction. + +## Keep Invalid Transactions In Cache + +Keep invalid transactions in cache determines wether a transaction in the cache, which is invalid, should be evicted. An invalid transaction here may mean that the transaction may rely on a different tx that has not been included in a block. + +## Max Transaction Bytes + +Max transaction bytes defines the max size a transaction can be for your node. If you would like your node to only keep track of smaller transactions this field would need to be changed. Default is 1MB. + +## Max Batch Bytes + +Max batch bytes defines the amount of bytes the node will send to a peer. Default is 0. + +> Note: Unused due to https://github.com/tendermint/tendermint/issues/5796 diff --git a/docs/tendermint-core/pex/README.md b/docs/tendermint-core/pex/README.md new file mode 100644 index 0000000000..5f5c3ed42b --- /dev/null +++ b/docs/tendermint-core/pex/README.md @@ -0,0 +1,177 @@ +--- +order: 1 +parent: + title: Peer Exchange + order: 5 +--- + +# Peer Strategy and Exchange + +Here we outline the design of the PeerStore +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails, or if we fail to dial +them. +Some peers can be marked as `private`, which means +we will not put them in the peer store or gossip them to others. + +All peers except private peers and peers coming from them are tracked using the +peer store. + +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + +## Discovery + +Peer discovery begins with a list of seeds. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently + +On startup, we will also immediately dial the given list of `persistent_peers`, +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. This behavior is when `persistent_peers_max_dial_period` is configured to zero. + +But If `persistent_peers_max_dial_period` is set greater than zero, terms between each dial to each persistent peer +will not exceed `persistent_peers_max_dial_period` during exponential backoff. +Therefore, `dial_period` = min(`persistent_peers_max_dial_period`, `exponential_backoff_dial_period`) +and we keep trying again regardless of `maxAttemptsToDial` + +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. + +## Listening + +Peers listen on a configurable ListenAddr that they self-report in their +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +Peers are added to the peer store from the PEX when they first connect to us or +when we hear about them from other peers. + +The peer store is arranged in sets of buckets, and distinguishes between +vetted (old) and unvetted (new) peers. It keeps different sets of buckets for +vetted and unvetted peers. Buckets provide randomization over peer selection. +Peers are put in buckets according to their IP groups. + +IP group can be a masked IP (e.g. `1.2.0.0` or `2602:100::`) or `local` for +local addresses or `unroutable` for unroutable addresses. The mask which +corresponds to the `/16` subnet is used for IPv4, `/32` subnet - for IPv6. +Each group has a limited number of buckets to prevent DoS attacks coming from +that group (e.g. an attacker buying a `/16` block of IPs and launching a DoS +attack). + +[highwayhash](https://arxiv.org/abs/1612.06257) is used as a hashing function +when calculating a bucket. + +When placing a peer into a new bucket: + +```md +hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +``` + +When placing a peer into an old bucket: + +```md +hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +``` + +where `key` - random 24 HEX string, `group` - IP group of the peer (e.g. `1.2.0.0`), +`sourcegroup` - IP group of the sender (peer who sent us this address) (e.g. `174.11.0.0`), +`addr` - string representation of the peer's address (e.g. `174.11.10.2:26656`). + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and +each instance of the peer can have a different IP:PORT. + +If we're trying to add a new peer but there's no space in its bucket, we'll +remove the worst peer from that bucket to make room. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the peer store. + +More fine-grained tracking of peer behaviour can be done using +a trust metric (see below), but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. + +We track the last time we dialed a peer and the number of unsuccessful attempts +we've made. If too many attempts are made, we mark the peer as bad. + +Connection attempts are made with exponential backoff (plus jitter). Because +the selection process happens every `ensurePeersPeriod`, we might not end up +dialing a peer for much longer than the backoff duration. + +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from peer store completely. But for persistent peers, we indefinitely try to +dial all persistent peers unless `persistent_peers_max_dial_period` is configured to zero + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: + +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the peer store and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itself +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us an unsolicited list of peers, +or if the peer sends a request too soon after another one, +we Disconnect and MarkBad. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + +See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) +and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) +architecture docs for more details. + + + + + + diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md deleted file mode 100644 index 52286e6c7b..0000000000 --- a/docs/tendermint-core/state-sync.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -order: 11 ---- - -# State Sync - -With block sync a node is downloading all of the data of an application from genesis and verifying it. -With state sync your node will download data related to the head or near the head of the chain and verify the data. -This leads to drastically shorter times for joining a network. - -Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md) - -## Events - -When a node starts with the statesync flag enabled in the config file, it will emit two events: one upon starting statesync and the other upon completion. - -The user can query the events by subscribing `EventQueryStateSyncStatus` -Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file diff --git a/docs/tendermint-core/state-sync/README.md b/docs/tendermint-core/state-sync/README.md new file mode 100644 index 0000000000..39e76ce39d --- /dev/null +++ b/docs/tendermint-core/state-sync/README.md @@ -0,0 +1,85 @@ +--- +order: 1 +parent: + title: State Sync + order: 4 +--- + + +State sync allows new nodes to rapidly bootstrap and join the network by discovering, fetching, +and restoring state machine snapshots. For more information, see the [state sync ABCI section](https://docs.tendermint.com/master/spec/abci/abci.html#state-sync)). + +The state sync reactor has two main responsibilities: + +* Serving state machine snapshots taken by the local ABCI application to new nodes joining the + network. + +* Discovering existing snapshots and fetching snapshot chunks for an empty local application + being bootstrapped. + +The state sync process for bootstrapping a new node is described in detail in the section linked +above. While technically part of the reactor (see `statesync/syncer.go` and related components), +this document will only cover the P2P reactor component. + +For details on the ABCI methods and data types, see the [ABCI documentation](https://docs.tendermint.com/master/spec/abci/). + +Information on how to configure state sync is located in the [nodes section](../../nodes/state-sync.md) + +## State Sync P2P Protocol + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +```go +type snapshotsRequestMessage struct{} +``` + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: + +```go +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} +``` + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +```go +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} +``` + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +```go +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} +``` + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. diff --git a/docs/tools/README.md b/docs/tools/README.md index 5d778f4700..b6e08162b5 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -12,7 +12,6 @@ Tendermint has some tools that are associated with it for: - [Debugging](./debugging/pro.md) - [Benchmarking](#benchmarking) - [Testnets](#testnets) -- [Validation of remote signers](./remote-signer-validation.md) ## Benchmarking diff --git a/docs/tools/docker-compose.md b/docs/tools/docker-compose.md index b1592ed09a..914f32bdee 100644 --- a/docs/tools/docker-compose.md +++ b/docs/tools/docker-compose.md @@ -169,7 +169,7 @@ Override the [command](https://github.com/tendermint/tendermint/blob/master/netw ipv4_address: 192.167.10.2 ``` -Similarly do for node1, node2 and node3 then [run testnet](https://github.com/tendermint/tendermint/blob/master/docs/networks/docker-compose.md#run-a-testnet) +Similarly do for node1, node2 and node3 then [run testnet](#run-a-testnet). ## Logging diff --git a/docs/tools/remote-signer-validation.md b/docs/tools/remote-signer-validation.md deleted file mode 100644 index 80a6a64bca..0000000000 --- a/docs/tools/remote-signer-validation.md +++ /dev/null @@ -1,156 +0,0 @@ -# tm-signer-harness - -Located under the `tools/tm-signer-harness` folder in the [Tendermint -repository](https://github.com/tendermint/tendermint). - -The Tendermint remote signer test harness facilitates integration testing -between Tendermint and remote signers such as -[tkkms](https://github.com/iqlusioninc/tmkms). Such remote signers allow for signing -of important Tendermint messages using -[HSMs](https://en.wikipedia.org/wiki/Hardware_security_module), providing -additional security. - -When executed, `tm-signer-harness`: - -1. Runs a listener (either TCP or Unix sockets). -2. Waits for a connection from the remote signer. -3. Upon connection from the remote signer, executes a number of automated tests - to ensure compatibility. -4. Upon successful validation, the harness process exits with a 0 exit code. - Upon validation failure, it exits with a particular exit code related to the - error. - -## Prerequisites - -Requires the same prerequisites as for building -[Tendermint](https://github.com/tendermint/tendermint). - -## Building - -From the `tools/tm-signer-harness` directory in your Tendermint source -repository, simply run: - -```bash -make - -# To have global access to this executable -make install -``` - -## Docker Image - -To build a Docker image containing the `tm-signer-harness`, also from the -`tools/tm-signer-harness` directory of your Tendermint source repo, simply run: - -```bash -make docker-image -``` - -## Running against KMS - -As an example of how to use `tm-signer-harness`, the following instructions show -you how to execute its tests against [tkkms](https://github.com/iqlusioninc/tmkms). -For this example, we will make use of the **software signing module in KMS**, as -the hardware signing module requires a physical -[YubiHSM](https://www.yubico.com/products/yubihsm/) device. - -### Step 1: Install KMS on your local machine - -See the [tkkms repo](https://github.com/iqlusioninc/tmkms) for details on how to set -KMS up on your local machine. - -If you have [Rust](https://www.rust-lang.org/) installed on your local machine, -you can simply install KMS by: - -```bash -cargo install tmkms -``` - -### Step 2: Make keys for KMS - -The KMS software signing module needs a key with which to sign messages. In our -example, we will simply export a signing key from our local Tendermint instance. - -```bash -# Will generate all necessary Tendermint configuration files, including: -# - ~/.tendermint/config/priv_validator_key.json -# - ~/.tendermint/data/priv_validator_state.json -tendermint init validator - -# Extract the signing key from our local Tendermint instance -tm-signer-harness extract_key \ # Use the "extract_key" command - -tmhome ~/.tendermint \ # Where to find the Tendermint home directory - -output ./signing.key # Where to write the key -``` - -Also, because we want KMS to connect to `tm-signer-harness`, we will need to -provide a secret connection key from KMS' side: - -```bash -tmkms keygen secret_connection.key -``` - -### Step 3: Configure and run KMS - -KMS needs some configuration to tell it to use the softer signing module as well -as the `signing.key` file we just generated. Save the following to a file called -`tmkms.toml`: - -```toml -[[validator]] -addr = "tcp://127.0.0.1:61219" # This is where we will find tm-signer-harness. -chain_id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (found in ~/.tendermint/config/genesis.json). -reconnect = true # true is the default -secret_key = "./secret_connection.key" # Where to find our secret connection key. - -[[providers.softsign]] -id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (same as validator.chain_id above). -path = "./signing.key" # The signing key we extracted earlier. -``` - -Then run KMS with this configuration: - -```bash -tmkms start -c tmkms.toml -``` - -This will start KMS, which will repeatedly try to connect to -`tcp://127.0.0.1:61219` until it is successful. - -### Step 4: Run tm-signer-harness - -Now we get to run the signer test harness: - -```bash -tm-signer-harness run \ # The "run" command executes the tests - -addr tcp://127.0.0.1:61219 \ # The address we promised KMS earlier - -tmhome ~/.tendermint # Where to find our Tendermint configuration/data files. -``` - -If the current version of Tendermint and KMS are compatible, `tm-signer-harness` -should now exit with a 0 exit code. If they are somehow not compatible, it -should exit with a meaningful non-zero exit code (see the exit codes below). - -### Step 5: Shut down KMS - -Simply hit Ctrl+Break on your KMS instance (or use the `kill` command in Linux) -to terminate it gracefully. - -## Exit Code Meanings - -The following list shows the various exit codes from `tm-signer-harness` and -their meanings: - -| Exit Code | Description | -| --- | --- | -| 0 | Success! | -| 1 | Invalid command line parameters supplied to `tm-signer-harness` | -| 2 | Maximum number of accept retries reached (the `-accept-retries` parameter) | -| 3 | Failed to load `${TMHOME}/config/genesis.json` | -| 4 | Failed to create listener specified by `-addr` parameter | -| 5 | Failed to start listener | -| 6 | Interrupted by `SIGINT` (e.g. when hitting Ctrl+Break or Ctrl+C) | -| 7 | Other unknown error | -| 8 | Test 1 failed: public key mismatch | -| 9 | Test 2 failed: signing of proposals failed | -| 10 | Test 3 failed: signing of votes failed | diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 9461f2ceaa..5c6988da9c 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -367,7 +367,11 @@ func main() { flag.Parse() - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to configure logger: %v", err) + os.Exit(1) + } server := abciserver.NewSocketServer(socketAddr, app) server.SetLogger(logger) @@ -438,7 +442,7 @@ This should create a `go.mod` file. The current tutorial only works with the master branch of Tendermint, so let's make sure we're using the latest version: ```sh -go get github.com/tendermint/tendermint@master +go get github.com/tendermint/tendermint@97a3e44e0724f2017079ce24d36433f03124c09e ``` This will populate the `go.mod` with a release number followed by a hash for Tendermint. diff --git a/docs/tutorials/java.md b/docs/tutorials/java.md deleted file mode 100644 index c36cafcf04..0000000000 --- a/docs/tutorials/java.md +++ /dev/null @@ -1,631 +0,0 @@ - - -# Creating an application in Java - -## Guide Assumptions - -This guide is designed for beginners who want to get started with a Tendermint -Core application from scratch. It does not assume that you have any prior -experience with Tendermint Core. - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine (your application) - written in any programming language - and securely -replicates it on many machines. - -By following along with this guide, you'll create a Tendermint Core project -called kvstore, a (very) simple distributed BFT key-value store. The application (which should -implementing the blockchain interface (ABCI)) will be written in Java. - -This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). - -## Built-in app vs external app - -If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. -[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. -Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. - -If you choose another language, like we did in this guide, you have to write a separate app, -which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. -This guide will show you how to build external application using RPC server. - -Having a separate application might give you better security guarantees as two -processes would be communicating via established binary protocol. Tendermint -Core will not have access to application's state. - -## 1.1 Installing Java and Gradle - -Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). - -Verify that you have installed Java successfully: - -```bash -$ java -version -java version "12.0.2" 2019-07-16 -Java(TM) SE Runtime Environment (build 12.0.2+10) -Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing) -``` - -You can choose any version of Java higher or equal to 8. -This guide is written using Java SE Development Kit 12. - -Make sure you have `$JAVA_HOME` environment variable set: - -```bash -$ echo $JAVA_HOME -/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home -``` - -For Gradle installation, please refer to [their official guide](https://gradle.org/install/). - -## 1.2 Creating a new Java project - -We'll start by creating a new Gradle project. - -```bash -export KVSTORE_HOME=~/kvstore -mkdir $KVSTORE_HOME -cd $KVSTORE_HOME -``` - -Inside the example directory run: - -```bash -gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit -``` - -This will create a new project for you. The tree of files should look like: - -```bash -$ tree -. -|-- build.gradle -|-- gradle -| `-- wrapper -| |-- gradle-wrapper.jar -| `-- gradle-wrapper.properties -|-- gradlew -|-- gradlew.bat -|-- settings.gradle -`-- src - |-- main - | |-- java - | | `-- io - | | `-- example - | | `-- App.java - | `-- resources - `-- test - |-- java - | `-- io - | `-- example - | `-- AppTest.java - `-- resources -``` - -When run, this should print "Hello world." to the standard output. - -```bash -$ ./gradlew run -> Task :run -Hello world. -``` - -## 1.3 Writing a Tendermint Core application - -Tendermint Core communicates with the application through the Application -BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). -This allows Tendermint Core to run applications written in any programming -language. - -### 1.3.1 Compile .proto files - -Add the following piece to the top of the `build.gradle`: - -```groovy -buildscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' - } -} -``` - -Enable the protobuf plugin in the `plugins` section of the `build.gradle`: - -```groovy -plugins { - id 'com.google.protobuf' version '0.8.8' -} -``` - -Add the following code to `build.gradle`: - -```groovy -protobuf { - protoc { - artifact = "com.google.protobuf:protoc:3.7.1" - } - plugins { - grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' - } - } - generateProtoTasks { - all()*.plugins { - grpc {} - } - } -} -``` - -Now we should be ready to compile the `*.proto` files. - -Copy the necessary `.proto` files to your project: - -```bash -mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto - -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/proof.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/proof.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/bits/types.proto -cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto -``` - -Add these dependencies to `build.gradle`: - -```groovy -dependencies { - implementation 'io.grpc:grpc-protobuf:1.22.1' - implementation 'io.grpc:grpc-netty-shaded:1.22.1' - implementation 'io.grpc:grpc-stub:1.22.1' -} -``` - -To generate all protobuf-type classes run: - -```bash -./gradlew generateProto -``` - -To verify that everything went smoothly, you can inspect the `build/generated/` directory: - -```bash -$ tree build/generated/ -build/generated/ -|-- source -| `-- proto -| `-- main -| |-- grpc -| | `-- types -| | `-- ABCIApplicationGrpc.java -| `-- java -| |-- com -| | `-- google -| | `-- protobuf -| | `-- GoGoProtos.java -| |-- common -| | `-- Types.java -| |-- proof -| | `-- Proof.java -| `-- types -| `-- Types.java -``` - -### 1.3.2 Implementing ABCI - -The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file -contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. - -Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content: - -```java -package io.example; - -import io.grpc.stub.StreamObserver; -import types.ABCIApplicationGrpc; -import types.Types.*; - -class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { - - // methods implementation - -} -``` - -Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding -required business logic. - -### 1.3.3 CheckTx - -When a new transaction is added to the Tendermint Core, it will ask the -application to check it (validate the format, signatures, etc.). - -```java -@Override -public void checkTx(RequestCheckTx req, StreamObserver responseObserver) { - var tx = req.getTx(); - int code = validate(tx); - var resp = ResponseCheckTx.newBuilder() - .setCode(code) - .setGasWanted(1) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} - -private int validate(ByteString tx) { - List parts = split(tx, '='); - if (parts.size() != 2) { - return 1; - } - byte[] key = parts.get(0); - byte[] value = parts.get(1); - - // check if the same key=value already exists - var stored = getPersistedValue(key); - if (stored != null && Arrays.equals(stored, value)) { - return 2; - } - - return 0; -} - -private List split(ByteString tx, char separator) { - var arr = tx.toByteArray(); - int i; - for (i = 0; i < tx.size(); i++) { - if (arr[i] == (byte)separator) { - break; - } - } - if (i == tx.size()) { - return Collections.emptyList(); - } - return List.of( - tx.substring(0, i).toByteArray(), - tx.substring(i + 1).toByteArray() - ); -} -``` - -Don't worry if this does not compile yet. - -If the transaction does not have a form of `{bytes}={bytes}`, we return `1` -code. When the same key=value already exist (same key and value), we return `2` -code. For others, we return a zero code indicating that they are valid. - -Note that anything with non-zero code will be considered invalid (`-1`, `100`, -etc.) by Tendermint Core. - -Valid transactions will eventually be committed given they are not too big and -have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). - -For the underlying key-value store we'll use -[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. - -`build.gradle`: - -```groovy -dependencies { - implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' -} -``` - -```java -... -import jetbrains.exodus.ArrayByteIterable; -import jetbrains.exodus.ByteIterable; -import jetbrains.exodus.env.Environment; -import jetbrains.exodus.env.Store; -import jetbrains.exodus.env.StoreConfig; -import jetbrains.exodus.env.Transaction; - -class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase { - private Environment env; - private Transaction txn = null; - private Store store = null; - - KVStoreApp(Environment env) { - this.env = env; - } - - ... - - private byte[] getPersistedValue(byte[] k) { - return env.computeInReadonlyTransaction(txn -> { - var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); - ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k)); - if (byteIterable == null) { - return null; - } - return byteIterable.getBytesUnsafe(); - }); - } -} -``` - -### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit - -When Tendermint Core has decided on the block, it's transferred to the -application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and -`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the -responses are expected to come in order. - -```java -@Override -public void beginBlock(RequestBeginBlock req, StreamObserver responseObserver) { - txn = env.beginTransaction(); - store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn); - var resp = ResponseBeginBlock.newBuilder().build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. - -```java -@Override -public void deliverTx(RequestDeliverTx req, StreamObserver responseObserver) { - var tx = req.getTx(); - int code = validate(tx); - if (code == 0) { - List parts = split(tx, '='); - var key = new ArrayByteIterable(parts.get(0)); - var value = new ArrayByteIterable(parts.get(1)); - store.put(txn, key, value); - } - var resp = ResponseDeliverTx.newBuilder() - .setCode(code) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -If the transaction is badly formatted or the same key=value already exist, we -again return the non-zero code. Otherwise, we add it to the store. - -In the current design, a block can include incorrect transactions (those who -passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer -directly). This is done for performance reasons. - -Note we can't commit transactions inside the `DeliverTx` because in such case -`Query`, which may be called in parallel, will return inconsistent data (i.e. -it will report that some value already exist even when the actual block was not -yet committed). - -`Commit` instructs the application to persist the new state. - -```java -@Override -public void commit(RequestCommit req, StreamObserver responseObserver) { - txn.commit(); - var resp = ResponseCommit.newBuilder() - .setData(ByteString.copyFrom(new byte[8])) - .build(); - responseObserver.onNext(resp); - responseObserver.onCompleted(); -} -``` - -### 1.3.5 Query - -Now, when the client wants to know whenever a particular key/value exist, it -will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call -the application's `Query` method. - -Applications are free to provide their own APIs. But by using Tendermint Core -as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage -the unified API across different applications. Plus they won't have to call the -otherwise separate Tendermint Core API for additional proofs. - -Note we don't include a proof here. - -```java -@Override -public void query(RequestQuery req, StreamObserver responseObserver) { - var k = req.getData().toByteArray(); - var v = getPersistedValue(k); - var builder = ResponseQuery.newBuilder(); - if (v == null) { - builder.setLog("does not exist"); - } else { - builder.setLog("exists"); - builder.setKey(ByteString.copyFrom(k)); - builder.setValue(ByteString.copyFrom(v)); - } - responseObserver.onNext(builder.build()); - responseObserver.onCompleted(); -} -``` - -The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). - -## 1.4 Starting an application and a Tendermint Core instances - -Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file: - -```java -package io.example; - -import jetbrains.exodus.env.Environment; -import jetbrains.exodus.env.Environments; - -import java.io.IOException; - -public class App { - public static void main(String[] args) throws IOException, InterruptedException { - try (Environment env = Environments.newInstance("tmp/storage")) { - var app = new KVStoreApp(env); - var server = new GrpcServer(app, 26658); - server.start(); - server.blockUntilShutdown(); - } - } -} -``` - -It is the entry point of the application. -Here we create a special object `Environment`, which knows where to store the application state. -Then we create and start the gRPC server to handle Tendermint Core requests. - -Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content: - -```java -package io.example; - -import io.grpc.BindableService; -import io.grpc.Server; -import io.grpc.ServerBuilder; - -import java.io.IOException; - -class GrpcServer { - private Server server; - - GrpcServer(BindableService service, int port) { - this.server = ServerBuilder.forPort(port) - .addService(service) - .build(); - } - - void start() throws IOException { - server.start(); - System.out.println("gRPC server started, listening on $port"); - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - System.out.println("shutting down gRPC server since JVM is shutting down"); - GrpcServer.this.stop(); - System.out.println("server shut down"); - })); - } - - private void stop() { - server.shutdown(); - } - - /** - * Await termination on the main thread since the grpc library uses daemon threads. - */ - void blockUntilShutdown() throws InterruptedException { - server.awaitTermination(); - } -} -``` - -## 1.5 Getting Up and Running - -To create a default configuration, nodeKey and private validator files, let's -execute `tendermint init`. But before we do that, we will need to install -Tendermint Core. - -```bash -$ rm -rf /tmp/example -$ cd $GOPATH/src/github.com/tendermint/tendermint -$ make install -$ TMHOME="/tmp/example" tendermint init validator - -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:20:36.483] Generated config module=main mode=validator -``` - -Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). - -We are ready to start our application: - -```bash -./gradlew run - -gRPC server started, listening on 26658 -``` - -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: - -```bash -$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy-app tcp://127.0.0.1:26658 - -I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node -I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 -I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 -``` - -Now open another tab in your terminal and try sending a transaction: - -```bash -$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "gasWanted": "1" - }, - "deliver_tx": {}, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" -} -``` - -Response should contain the height where this transaction was committed. - -Now let's check if the given key now exists and its value: - -```bash -$ curl -s 'localhost:26657/abci_query?data="tendermint"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3My" - } - } -} -``` - -`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. - -## Outro - -I hope everything went smoothly and your first, but hopefully not the last, -Tendermint Core application is up and running. If not, please [open an issue on -Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/master/). - -The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java). diff --git a/docs/tutorials/kotlin.md b/docs/tutorials/kotlin.md deleted file mode 100644 index 6fa1d1894d..0000000000 --- a/docs/tutorials/kotlin.md +++ /dev/null @@ -1,605 +0,0 @@ - - -# Creating an application in Kotlin - -## Guide Assumptions - -This guide is designed for beginners who want to get started with a Tendermint -Core application from scratch. It does not assume that you have any prior -experience with Tendermint Core. - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine (your application) - written in any programming language - and securely -replicates it on many machines. - -By following along with this guide, you'll create a Tendermint Core project -called kvstore, a (very) simple distributed BFT key-value store. The application (which should -implementing the blockchain interface (ABCI)) will be written in Kotlin. - -This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html). - -## Built-in app vs external app - -If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance. -[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way. -Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details. - -If you choose another language, like we did in this guide, you have to write a separate app, -which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC. -This guide will show you how to build external application using RPC server. - -Having a separate application might give you better security guarantees as two -processes would be communicating via established binary protocol. Tendermint -Core will not have access to application's state. - -## 1.1 Installing Java and Gradle - -Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html). - -Verify that you have installed Java successfully: - -```bash -java -version -java version "1.8.0_162" -Java(TM) SE Runtime Environment (build 1.8.0_162-b12) -Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode) -``` - -You can choose any version of Java higher or equal to 8. -In my case it is Java SE Development Kit 8. - -Make sure you have `$JAVA_HOME` environment variable set: - -```bash -echo $JAVA_HOME -/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home -``` - -For Gradle installation, please refer to [their official guide](https://gradle.org/install/). - -## 1.2 Creating a new Kotlin project - -We'll start by creating a new Gradle project. - -```bash -export KVSTORE_HOME=~/kvstore -mkdir $KVSTORE_HOME -cd $KVSTORE_HOME -``` - -Inside the example directory run: - -```bash -gradle init --dsl groovy --package io.example --project-name example --type kotlin-application -``` - -This will create a new project for you. The tree of files should look like: - -```bash -tree -. -|-- build.gradle -|-- gradle -| `-- wrapper -| |-- gradle-wrapper.jar -| `-- gradle-wrapper.properties -|-- gradlew -|-- gradlew.bat -|-- settings.gradle -`-- src - |-- main - | |-- kotlin - | | `-- io - | | `-- example - | | `-- App.kt - | `-- resources - `-- test - |-- kotlin - | `-- io - | `-- example - | `-- AppTest.kt - `-- resources -``` - -When run, this should print "Hello world." to the standard output. - -```bash -./gradlew run -> Task :run -Hello world. -``` - -## 1.3 Writing a Tendermint Core application - -Tendermint Core communicates with the application through the Application -BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). -This allows Tendermint Core to run applications written in any programming -language. - -### 1.3.1 Compile .proto files - -Add the following piece to the top of the `build.gradle`: - -```groovy -buildscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8' - } -} -``` - -Enable the protobuf plugin in the `plugins` section of the `build.gradle`: - -```groovy -plugins { - id 'com.google.protobuf' version '0.8.8' -} -``` - -Add the following code to `build.gradle`: - -```groovy -protobuf { - protoc { - artifact = "com.google.protobuf:protoc:3.7.1" - } - plugins { - grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1' - } - } - generateProtoTasks { - all()*.plugins { - grpc {} - } - } -} -``` - -Now we should be ready to compile the `*.proto` files. - -Copy the necessary `.proto` files to your project: - -```bash -mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto - -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/proof.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/proof.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto -cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/bits/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/bits/types.proto -cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ - $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto -``` - -Add these dependencies to `build.gradle`: - -```groovy -dependencies { - implementation 'io.grpc:grpc-protobuf:1.22.1' - implementation 'io.grpc:grpc-netty-shaded:1.22.1' - implementation 'io.grpc:grpc-stub:1.22.1' -} -``` - -To generate all protobuf-type classes run: - -```bash -./gradlew generateProto -``` - -To verify that everything went smoothly, you can inspect the `build/generated/` directory: - -```bash -tree build/generated/ -build/generated/ -`-- source - `-- proto - `-- main - |-- grpc - | `-- types - | `-- ABCIApplicationGrpc.java - `-- java - |-- com - | `-- google - | `-- protobuf - | `-- GoGoProtos.java - |-- common - | `-- Types.java - |-- merkle - | `-- Merkle.java - `-- types - `-- Types.java -``` - -### 1.3.2 Implementing ABCI - -The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file -contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement. - -Create `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` file with the following content: - -```kotlin -package io.example - -import io.grpc.stub.StreamObserver -import types.ABCIApplicationGrpc -import types.Types.* - -class KVStoreApp : ABCIApplicationGrpc.ABCIApplicationImplBase() { - - // methods implementation - -} -``` - -Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding -required business logic. - -### 1.3.3 CheckTx - -When a new transaction is added to the Tendermint Core, it will ask the -application to check it (validate the format, signatures, etc.). - -```kotlin -override fun checkTx(req: RequestCheckTx, responseObserver: StreamObserver) { - val code = req.tx.validate() - val resp = ResponseCheckTx.newBuilder() - .setCode(code) - .setGasWanted(1) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} - -private fun ByteString.validate(): Int { - val parts = this.split('=') - if (parts.size != 2) { - return 1 - } - val key = parts[0] - val value = parts[1] - - // check if the same key=value already exists - val stored = getPersistedValue(key) - if (stored != null && stored.contentEquals(value)) { - return 2 - } - - return 0 -} - -private fun ByteString.split(separator: Char): List { - val arr = this.toByteArray() - val i = (0 until this.size()).firstOrNull { arr[it] == separator.toByte() } - ?: return emptyList() - return listOf( - this.substring(0, i).toByteArray(), - this.substring(i + 1).toByteArray() - ) -} -``` - -Don't worry if this does not compile yet. - -If the transaction does not have a form of `{bytes}={bytes}`, we return `1` -code. When the same key=value already exist (same key and value), we return `2` -code. For others, we return a zero code indicating that they are valid. - -Note that anything with non-zero code will be considered invalid (`-1`, `100`, -etc.) by Tendermint Core. - -Valid transactions will eventually be committed given they are not too big and -have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). - -For the underlying key-value store we'll use -[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java. - -`build.gradle`: - -```groovy -dependencies { - implementation 'org.jetbrains.xodus:xodus-environment:1.3.91' -} -``` - -```kotlin -... -import jetbrains.exodus.ArrayByteIterable -import jetbrains.exodus.env.Environment -import jetbrains.exodus.env.Store -import jetbrains.exodus.env.StoreConfig -import jetbrains.exodus.env.Transaction - -class KVStoreApp( - private val env: Environment -) : ABCIApplicationGrpc.ABCIApplicationImplBase() { - - private var txn: Transaction? = null - private var store: Store? = null - - ... - - private fun getPersistedValue(k: ByteArray): ByteArray? { - return env.computeInReadonlyTransaction { txn -> - val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn) - store.get(txn, ArrayByteIterable(k))?.bytesUnsafe - } - } -} -``` - -### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit - -When Tendermint Core has decided on the block, it's transferred to the -application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and -`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the -responses are expected to come in order. - -```kotlin -override fun beginBlock(req: RequestBeginBlock, responseObserver: StreamObserver) { - txn = env.beginTransaction() - store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn!!) - val resp = ResponseBeginBlock.newBuilder().build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store. - -```kotlin -override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver) { - val code = req.tx.validate() - if (code == 0) { - val parts = req.tx.split('=') - val key = ArrayByteIterable(parts[0]) - val value = ArrayByteIterable(parts[1]) - store!!.put(txn!!, key, value) - } - val resp = ResponseDeliverTx.newBuilder() - .setCode(code) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -If the transaction is badly formatted or the same key=value already exist, we -again return the non-zero code. Otherwise, we add it to the store. - -In the current design, a block can include incorrect transactions (those who -passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer -directly). This is done for performance reasons. - -Note we can't commit transactions inside the `DeliverTx` because in such case -`Query`, which may be called in parallel, will return inconsistent data (i.e. -it will report that some value already exist even when the actual block was not -yet committed). - -`Commit` instructs the application to persist the new state. - -```kotlin -override fun commit(req: RequestCommit, responseObserver: StreamObserver) { - txn!!.commit() - val resp = ResponseCommit.newBuilder() - .setData(ByteString.copyFrom(ByteArray(8))) - .build() - responseObserver.onNext(resp) - responseObserver.onCompleted() -} -``` - -### 1.3.5 Query - -Now, when the client wants to know whenever a particular key/value exist, it -will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call -the application's `Query` method. - -Applications are free to provide their own APIs. But by using Tendermint Core -as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage -the unified API across different applications. Plus they won't have to call the -otherwise separate Tendermint Core API for additional proofs. - -Note we don't include a proof here. - -```kotlin -override fun query(req: RequestQuery, responseObserver: StreamObserver) { - val k = req.data.toByteArray() - val v = getPersistedValue(k) - val builder = ResponseQuery.newBuilder() - if (v == null) { - builder.log = "does not exist" - } else { - builder.log = "exists" - builder.key = ByteString.copyFrom(k) - builder.value = ByteString.copyFrom(v) - } - responseObserver.onNext(builder.build()) - responseObserver.onCompleted() -} -``` - -The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). - -## 1.4 Starting an application and a Tendermint Core instances - -Put the following code into the `$KVSTORE_HOME/src/main/kotlin/io/example/App.kt` file: - -```kotlin -package io.example - -import jetbrains.exodus.env.Environments - -fun main() { - Environments.newInstance("tmp/storage").use { env -> - val app = KVStoreApp(env) - val server = GrpcServer(app, 26658) - server.start() - server.blockUntilShutdown() - } -} -``` - -It is the entry point of the application. -Here we create a special object `Environment`, which knows where to store the application state. -Then we create and start the gRPC server to handle Tendermint Core requests. - -Create `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt` file with the following content: - -```kotlin -package io.example - -import io.grpc.BindableService -import io.grpc.ServerBuilder - -class GrpcServer( - private val service: BindableService, - private val port: Int -) { - private val server = ServerBuilder - .forPort(port) - .addService(service) - .build() - - fun start() { - server.start() - println("gRPC server started, listening on $port") - Runtime.getRuntime().addShutdownHook(object : Thread() { - override fun run() { - println("shutting down gRPC server since JVM is shutting down") - this@GrpcServer.stop() - println("server shut down") - } - }) - } - - fun stop() { - server.shutdown() - } - - /** - * Await termination on the main thread since the grpc library uses daemon threads. - */ - fun blockUntilShutdown() { - server.awaitTermination() - } - -} -``` - -## 1.5 Getting Up and Running - -To create a default configuration, nodeKey and private validator files, let's -execute `tendermint init validator`. But before we do that, we will need to install -Tendermint Core. - -```bash -rm -rf /tmp/example -cd $GOPATH/src/github.com/tendermint/tendermint -make install -TMHOME="/tmp/example" tendermint init validator - -I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json -I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json -I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json -I[2019-07-16|18:20:36.482] Generated config module=main mode=validator -``` - -Feel free to explore the generated files, which can be found at -`/tmp/example/config` directory. Documentation on the config can be found -[here](https://docs.tendermint.com/master/tendermint-core/configuration.html). - -We are ready to start our application: - -```bash -./gradlew run - -gRPC server started, listening on 26658 -``` - -Then we need to start Tendermint Core and point it to our application. Staying -within the application directory execute: - -```bash -TMHOME="/tmp/example" tendermint node --abci grpc --proxy-app tcp://127.0.0.1:26658 - -I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 -I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node -I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}" -I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0 -I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000 -``` - -Now open another tab in your terminal and try sending a transaction: - -```bash -curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "gasWanted": "1" - }, - "deliver_tx": {}, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" -} -``` - -Response should contain the height where this transaction was committed. - -Now let's check if the given key now exists and its value: - -```bash -curl -s 'localhost:26657/abci_query?data="tendermint"' -{ - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3My" - } - } -} -``` - -`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly. - -## Outro - -I hope everything went smoothly and your first, but hopefully not the last, -Tendermint Core application is up and running. If not, please [open an issue on -Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig -deeper, read [the docs](https://docs.tendermint.com/master/). - -The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-kotlin). diff --git a/docs/versions b/docs/versions index c88a614bd9..70754facc3 100644 --- a/docs/versions +++ b/docs/versions @@ -1,4 +1,4 @@ master master -v0.32.x v0.32 v0.33.x v0.33 v0.34.x v0.34 +v0.35.x v0.35 diff --git a/go.mod b/go.mod index e910a952ce..885dcdebc8 100644 --- a/go.mod +++ b/go.mod @@ -1,43 +1,217 @@ module github.com/tendermint/tendermint -go 1.16 +go 1.17 require ( - github.com/BurntSushi/toml v0.4.1 - github.com/adlio/schema v1.1.14 + github.com/BurntSushi/toml v1.0.0 + github.com/adlio/schema v1.2.3 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/celestiaorg/nmt v0.8.0 github.com/celestiaorg/rsmt2d v0.3.0 + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.42.1 + github.com/golangci/golangci-lint v1.44.0 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.3 + github.com/lib/pq v1.10.4 github.com/libp2p/go-buffer-pool v0.0.2 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.12.1 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 - github.com/rs/cors v1.8.0 - github.com/rs/zerolog v1.25.0 - github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa + github.com/rs/cors v1.8.2 + github.com/rs/zerolog v1.26.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.9.0 + github.com/spf13/cobra v1.3.0 + github.com/spf13/viper v1.10.1 github.com/stretchr/testify v1.7.0 - github.com/tendermint/tm-db v0.6.4 - github.com/vektra/mockery/v2 v2.9.4 - golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 - golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b + github.com/tendermint/tm-db v0.6.6 + github.com/vektra/mockery/v2 v2.10.0 + golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce + golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/grpc v1.41.0 + google.golang.org/grpc v1.44.0 pgregory.net/rapid v0.4.7 ) + +require ( + 4d63.com/gochecknoglobals v0.1.0 // indirect + github.com/Antonboom/errname v0.1.5 // indirect + github.com/Antonboom/nilnil v0.1.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DataDog/zstd v1.4.1 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/OpenPeeDeeP/depguard v1.1.0 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/makezero v1.1.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.5.0 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/breml/bidichk v0.2.1 // indirect + github.com/breml/errchkjson v0.2.1 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/celestiaorg/go-leopard v0.1.0 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/charithe/durationcheck v0.0.9 // indirect + github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect + github.com/containerd/continuity v0.2.1 // indirect + github.com/daixiang0/gci v0.2.9 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingajkin/go-header v0.4.2 // indirect + github.com/dgraph-io/badger/v2 v2.2007.2 // indirect + github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fzipp/gocyclo v0.4.0 // indirect + github.com/go-critic/go-critic v0.6.2 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.0 // indirect + github.com/go-toolsmith/astequal v1.0.1 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect + github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kisielk/errcheck v1.6.0 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kulti/thelper v0.5.0 // indirect + github.com/kunwardeep/paralleltest v1.0.3 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.2 // indirect + github.com/ldez/tagliatelle v0.3.0 // indirect + github.com/leonklingele/grouper v1.1.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/maratori/testpackage v1.0.1 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect + github.com/mgechev/revive v1.1.3 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.7.11 // indirect + github.com/nishanths/predeclared v0.2.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.0.3 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/quasilyte/go-ruleguard v0.3.15 // indirect + github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/ryancurrah/gomodguard v1.2.3 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect + github.com/securego/gosec/v2 v2.9.6 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sivchari/containedctx v1.0.1 // indirect + github.com/sivchari/tenv v1.4.7 // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.8.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/sylvia7788/contextcheck v1.0.4 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect + github.com/tdakkota/asciicheck v0.1.1 // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect + github.com/tomarrell/wrapcheck/v2 v2.4.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.4 // indirect + github.com/uudashr/gocognit v1.0.5 // indirect + github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect + gitlab.com/bosi/decorder v0.2.1 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + golang.org/x/mod v0.5.1 // indirect + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/ini.v1 v1.66.3 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + honnef.co/go/tools v0.2.2 // indirect + mvdan.cc/gofumpt v0.2.1 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect +) diff --git a/go.sum b/go.sum index aeb473e319..42cf54475d 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -7,6 +7,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -20,6 +21,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -28,6 +30,10 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,8 +42,8 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -49,16 +55,21 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY= -github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= +github.com/Antonboom/errname v0.1.5 h1:IM+A/gz0pDhKmlt5KSNTVAvfLMb+65RxavBXpRtCUEg= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0 h1:DLDavmg0a6G/F4Lt9t7Enrbgb3Oph6LnDE6YVsmTt74= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -73,21 +84,20 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= +github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU= -github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg= +github.com/adlio/schema v1.2.3 h1:GfKThfEsjS9cCz7gaF8zdXv4cpTdUqdljkKGDTbJjys= +github.com/adlio/schema v1.2.3/go.mod h1:nD7ZWmMMbwU12Pqwg+qL0rTvHBrBXfNz+5UQxTfy38M= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -98,7 +108,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= @@ -106,12 +117,13 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= -github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde h1:YOsoVXsZQPA9aOTy1g0lAJv5VzZUvwQuZqug8XPeqfM= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= +github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v1.1.0 h1:b2FVq4dTlBpy9f6qxhbyWH+6zy56IETE9cFbBGtDqs8= +github.com/ashanbrown/makezero v1.1.0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -126,12 +138,16 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.5.0 h1:v9LpMwxzTqAJC4lsD/jR7zWb8a66trcqhTEH4Mk6Fio= +github.com/blizzy78/varnamelen v0.5.0/go.mod h1:Mc0nLBKI1/FP0Ga4kqMOgBig0eS5QtR107JnMAb1Wuc= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.2.1 h1:SRNtZuLdfkxtocj+xyHXKC1Uv3jVi6EPYx+NHSTNQvE= +github.com/breml/bidichk v0.2.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= +github.com/breml/errchkjson v0.2.1 h1:QCToXnY9BNngrbJoW3qfCTt3BdtbnsI6wyP/WGrxxSE= +github.com/breml/errchkjson v0.2.1/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= @@ -146,6 +162,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/celestiaorg/go-leopard v0.1.0 h1:28z2EkvKJIez5J9CEaiiUEC+OxalRLtTGJJ1oScfE1g= github.com/celestiaorg/go-leopard v0.1.0/go.mod h1:NtO/rjlB8dw2aq7jr06vZFKGvryQcTDXaNHelmPNOAM= @@ -159,13 +177,14 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= -github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= @@ -180,15 +199,19 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= -github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= +github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= +github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -201,6 +224,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -241,10 +265,12 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -256,8 +282,9 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+ne github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -266,18 +293,18 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k= +github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/go-critic/go-critic v0.5.6 h1:siUR1+322iVikWXoV75I1YRfNaC/yaLzhdF9Zwd8Tus= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= +github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs= +github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -291,30 +318,29 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= @@ -380,16 +406,16 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.42.1 h1:nC4WyrbdnNdohDVUoNKjy/4N4FTM1gCFaVeXecy6vzM= -github.com/golangci/golangci-lint v1.42.1/go.mod h1:MuInrVlgg2jq4do6XI1jbkErbVHVbwdrLLtGv6p2wPI= +github.com/golangci/golangci-lint v1.44.0 h1:YJPouGNQEdK+x2KsCpWMIBy0q6MSuxHjkWMxJMNj/DU= +github.com/golangci/golangci-lint v1.44.0/go.mod h1:aBolpzNkmYogKPynGKdOWDCEc8LlwnxZC6w/SJ1TaEs= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -429,8 +455,10 @@ github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -440,17 +468,18 @@ github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4Mgqvf github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -463,15 +492,20 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 h1:rx8127mFPqXXsfPSo8BwnIU97MKFZc89WHAHt8PwDVY= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -486,9 +520,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -497,6 +531,7 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -505,29 +540,30 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -540,8 +576,8 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.0 h1:u6h4eiNuCLqk73Ic5TXQq9yZS+uEXTdusn7c3w1Mr6A= -github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -552,6 +588,7 @@ github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -566,8 +603,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -578,9 +615,8 @@ github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -589,51 +625,56 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kulti/thelper v0.5.0 h1:CiEKStgoG4K9bjf/zk3eNX0D0J2iFWzxEY+h9UXmlJg= +github.com/kulti/thelper v0.5.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.3 h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/ldez/tagliatelle v0.3.0 h1:Aubm2ZsrsjIGFvdxemMPJaXrSJ5Cys6VWyTQFt9k2dI= +github.com/ldez/tagliatelle v0.3.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= +github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg= -github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -645,38 +686,36 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.1.1 h1:mkXNHP14Y6tfq+ocnQaiKEtgJDM41yaoyQq4qn6TD/4= -github.com/mgechev/revive v1.1.1/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.3 h1:6tBZacs2/uv9UOpkBQhCtXh2NGgu2Ry97ZyjcN6uDCM= +github.com/mgechev/revive v1.1.3/go.mod h1:jMzDa13teAuv/KLeqgJw79NDe+1IT0ZO3Mht0vN1Yls= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= @@ -697,8 +736,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= @@ -709,8 +748,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.2.3 h1:+ANTMqRNrqwInnP9aszg/0jDo+zbXa4x66U19Bx/oTk= -github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= +github.com/nishanths/exhaustive v0.7.11 h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA= +github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= @@ -733,19 +772,22 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -753,40 +795,46 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349 h1:Kq/3kL0k033ds3tyez5lFPrfQ74fNJ+OqCclRipubwA= -github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b h1:/BDyEJWLnDUYKGWdlNx/82qSaVu2bUok/EvPUtIGuvw= +github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -798,8 +846,9 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -811,14 +860,17 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4 h1:F6l5p6+7WBcTKS7foNQ4wqA39zjn2+RbdbyzGxIq1B0= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= +github.com/quasilyte/go-ruleguard v0.3.15 h1:iWYzp1z72IlXTioET0+XI6SjQdPfMGfuAiZiKznOt7g= +github.com/quasilyte/go-ruleguard v0.3.15/go.mod h1:NhuWhnlVEM1gT1A4VJHYfy9MuYSxxwHgxWoPsn9llB4= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.12-0.20220101150716-969a394a9451/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.12/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.15/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 h1:P4QPNn+TK49zJjXKERt/vyPbv/mCHB/zQ4flDYOMN+M= +github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -827,35 +879,36 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= -github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= -github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= -github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= +github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= -github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= -github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= +github.com/securego/gosec/v2 v2.9.6 h1:ysfvgQBp2zmTgXQl65UkqEkYlQGbnVSRUGpCrJiiR4c= +github.com/securego/gosec/v2 v2.9.6/go.mod h1:EESY9Ywxo/Zc5NyF/qIj6Cop+4PSWM0F0OfGD7FdIXc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= +github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -865,6 +918,10 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sivchari/containedctx v1.0.1 h1:fJq44cX+tD+uT5xGrsg25GwiaY61NGybQk9WWKij3Uo= +github.com/sivchari/containedctx v1.0.1/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= @@ -879,17 +936,18 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60= +github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -899,13 +957,12 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -923,35 +980,39 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= +github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= -github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= -github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0= -github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= -github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= +github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= +github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= -github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= +github.com/tomarrell/wrapcheck/v2 v2.4.0 h1:mU4H9KsqqPZUALOUbVOpjy8qNQbWLoLI9fV68/1tq30= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= -github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= +github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= @@ -961,11 +1022,11 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= -github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vektra/mockery/v2 v2.10.0 h1:MiiQWxwdq7/ET6dCXLaJzSGEN17k758H7JHS9kOdiks= +github.com/vektra/mockery/v2 v2.10.0/go.mod h1:m/WO2UzWzqgVX3nvqpRQq70I4Z7jbSCRhdmkgtp+Ab4= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= @@ -974,8 +1035,10 @@ github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0 h1:goWULN0jH5Yajmu/K+v1xCqIREeB+48OiJ2uu2ssc7U= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 h1:YAaOqqMTstELMMGblt6yJ/fcOt4owSYuw3IttMnKfAM= +github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= @@ -984,21 +1047,28 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= +gitlab.com/bosi/decorder v0.2.1 h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w= +gitlab.com/bosi/decorder v0.2.1/go.mod h1:6C/nhLSbF6qZbYD8bRmISBwc6vcWdNsiIBkRvjJFrH0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1044,11 +1114,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1088,15 +1162,16 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1126,7 +1201,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1140,13 +1214,19 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b h1:MWaHNqZy3KTpuTMAGvv+Kw+ylsEpmyJZizz1dqxnu28= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1158,11 +1238,12 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1197,7 +1278,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1229,21 +1309,23 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1251,6 +1333,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1260,13 +1343,29 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1287,7 +1386,6 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1306,7 +1404,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1316,7 +1413,6 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1336,7 +1432,6 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200323144430-8dcfad9e016e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -1373,17 +1468,21 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da h1:Tno72dYE94v/7SyyIj9iBsc7OOjFu2PyNcl7yxxeZD8= +golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1415,13 +1514,18 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1471,7 +1575,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1488,8 +1594,19 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1520,8 +1637,11 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1549,12 +1669,10 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.3 h1:jRskFVxYaMGAMUbN0UZ7niA9gzL9B49DOqE78vg0k3w= +gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1582,16 +1700,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= -honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +mvdan.cc/gofumpt v0.2.1 h1:7jakRGkQcLAJdT+C8Bwc9d0BANkVPSkHZkzNv07pJAs= +mvdan.cc/gofumpt v0.2.1/go.mod h1:a/rvZPhsNaedOJBzqRD9omnwVwHZsBdJirXHa9Gh9Ig= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6tvqeIdtsVSsxmNPqlFU= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= +mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/blocksync/pool.go b/internal/blocksync/pool.go index d10a32b0d1..156640b35a 100644 --- a/internal/blocksync/pool.go +++ b/internal/blocksync/pool.go @@ -1,14 +1,15 @@ package blocksync import ( + "context" "errors" "fmt" "math" + "sync" "sync/atomic" "time" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -68,9 +69,11 @@ type BlockRequest struct { // BlockPool keeps track of the block sync peers, block requests and block responses. type BlockPool struct { service.BaseService + logger log.Logger + lastAdvance time.Time - mtx tmsync.RWMutex + mtx sync.RWMutex // block requests requesters map[int64]*bpRequester height int64 // the lowest key in requesters. @@ -83,6 +86,7 @@ type BlockPool struct { requestsCh chan<- BlockRequest errorsCh chan<- peerError + exitedCh chan struct{} startHeight int64 lastHundredBlockTimeStamp time.Time @@ -91,34 +95,48 @@ type BlockPool struct { // NewBlockPool returns a new BlockPool with the height equal to start. Block // requests and errors will be sent to requestsCh and errorsCh accordingly. -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { - bp := &BlockPool{ - peers: make(map[types.NodeID]*bpPeer), - - requesters: make(map[int64]*bpRequester), - height: start, - startHeight: start, - numPending: 0, +func NewBlockPool( + logger log.Logger, + start int64, + requestsCh chan<- BlockRequest, + errorsCh chan<- peerError, +) *BlockPool { + bp := &BlockPool{ + logger: logger, + peers: make(map[types.NodeID]*bpPeer), + requesters: make(map[int64]*bpRequester), + height: start, + startHeight: start, + numPending: 0, + exitedCh: make(chan struct{}), requestsCh: requestsCh, errorsCh: errorsCh, lastSyncRate: 0, } - bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) + bp.BaseService = *service.NewBaseService(logger, "BlockPool", bp) return bp } // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. -func (pool *BlockPool) OnStart() error { +func (pool *BlockPool) OnStart(ctx context.Context) error { pool.lastAdvance = time.Now() pool.lastHundredBlockTimeStamp = pool.lastAdvance - go pool.makeRequestersRoutine() + go pool.makeRequestersRoutine(ctx) + + go func() { + defer close(pool.exitedCh) + pool.Wait() + }() + return nil } +func (*BlockPool) OnStop() {} + // spawns requesters as needed -func (pool *BlockPool) makeRequestersRoutine() { +func (pool *BlockPool) makeRequestersRoutine(ctx context.Context) { for { if !pool.IsRunning() { break @@ -138,7 +156,7 @@ func (pool *BlockPool) makeRequestersRoutine() { pool.removeTimedoutPeers() default: // request for more blocks. - pool.makeNextRequester() + pool.makeNextRequester(ctx) } } } @@ -155,7 +173,7 @@ func (pool *BlockPool) removeTimedoutPeers() { if curRate != 0 && curRate < minRecvRate { err := errors.New("peer is not sending us data fast enough") pool.sendError(err, peer.id) - pool.Logger.Error("SendTimeout", "peer", peer.id, + pool.logger.Error("SendTimeout", "peer", peer.id, "reason", err, "curRate", fmt.Sprintf("%d KB/s", curRate/1024), "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) @@ -218,7 +236,7 @@ func (pool *BlockPool) PopRequest() { if r := pool.requesters[pool.height]; r != nil { if err := r.Stop(); err != nil { - pool.Logger.Error("Error stopping requester", "err", err) + pool.logger.Error("error stopping requester", "err", err) } delete(pool.requesters, pool.height) pool.height++ @@ -265,7 +283,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi requester := pool.requesters[block.Height] if requester == nil { - pool.Logger.Error("peer sent us a block we didn't expect", + pool.logger.Error("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) diff := pool.height - block.Height if diff < 0 { @@ -285,7 +303,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi } } else { err := errors.New("requester is different or block already exists") - pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) + pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) pool.sendError(err, peerID) } } @@ -316,7 +334,7 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6 peer.height = height } else { peer = newBPPeer(pool, peerID, base, height) - peer.setLogger(pool.Logger.With("peer", peerID)) + peer.logger = pool.logger.With("peer", peerID) pool.peers[peerID] = peer } @@ -391,7 +409,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { return nil } -func (pool *BlockPool) makeNextRequester() { +func (pool *BlockPool) makeNextRequester(ctx context.Context) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -400,14 +418,14 @@ func (pool *BlockPool) makeNextRequester() { return } - request := newBPRequester(pool, nextHeight) + request := newBPRequester(pool.logger, pool, nextHeight) pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) - err := request.Start() + err := request.Start(ctx) if err != nil { - request.Logger.Error("Error starting request", "err", err) + request.logger.Error("error starting request", "err", err) } } @@ -471,7 +489,7 @@ type bpPeer struct { base int64 pool *BlockPool id types.NodeID - recvMonitor *flow.Monitor + recvMonitor *flowrate.Monitor timeout *time.Timer @@ -490,12 +508,8 @@ func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) * return peer } -func (peer *bpPeer) setLogger(l log.Logger) { - peer.logger = l -} - func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) + peer.recvMonitor = flowrate.New(time.Second, time.Second*40) initialValue := float64(minRecvRate) * math.E peer.recvMonitor.SetREMA(initialValue) } @@ -540,18 +554,20 @@ func (peer *bpPeer) onTimeout() { type bpRequester struct { service.BaseService + logger log.Logger pool *BlockPool height int64 gotBlockCh chan struct{} redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat - mtx tmsync.Mutex + mtx sync.Mutex peerID types.NodeID block *types.Block } -func newBPRequester(pool *BlockPool, height int64) *bpRequester { +func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { bpr := &bpRequester{ + logger: pool.logger, pool: pool, height: height, gotBlockCh: make(chan struct{}, 1), @@ -560,15 +576,17 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { peerID: "", block: nil, } - bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) + bpr.BaseService = *service.NewBaseService(logger, "bpRequester", bpr) return bpr } -func (bpr *bpRequester) OnStart() error { - go bpr.requestRoutine() +func (bpr *bpRequester) OnStart(ctx context.Context) error { + go bpr.requestRoutine(ctx) return nil } +func (*bpRequester) OnStop() {} + // Returns true if the peer matches and block doesn't already exist. func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { bpr.mtx.Lock() @@ -623,7 +641,13 @@ func (bpr *bpRequester) redo(peerID types.NodeID) { // Responsible for making more requests as necessary // Returns only when a block is found (e.g. AddBlock() is called) -func (bpr *bpRequester) requestRoutine() { +func (bpr *bpRequester) requestRoutine(ctx context.Context) { + bprPoolDone := make(chan struct{}) + go func() { + defer close(bprPoolDone) + bpr.pool.Wait() + }() + OUTER_LOOP: for { // Pick a peer to send request to. @@ -649,13 +673,13 @@ OUTER_LOOP: WAIT_LOOP: for { select { - case <-bpr.pool.Quit(): + case <-ctx.Done(): + return + case <-bpr.pool.exitedCh: if err := bpr.Stop(); err != nil { - bpr.Logger.Error("Error stopped requester", "err", err) + bpr.logger.Error("error stopped requester", "err", err) } return - case <-bpr.Quit(): - return case peerID := <-bpr.redoCh: if peerID == bpr.peerID { bpr.reset() diff --git a/internal/blocksync/pool_test.go b/internal/blocksync/pool_test.go index cbe19acbe4..0718fee164 100644 --- a/internal/blocksync/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "fmt" mrand "math/rand" "testing" @@ -78,23 +79,20 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { } func TestBlockPoolBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) - err := pool.Start() - if err != nil { + if err := pool.Start(ctx); err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) peers.start() defer peers.stop() @@ -138,21 +136,19 @@ func TestBlockPoolBasic(t *testing.T) { } func TestBlockPoolTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(log.TestingLogger(), start, requestsCh, errorsCh) + err := pool.Start(ctx) if err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) for _, peer := range peers { t.Logf("Peer %v", peer.id) @@ -201,6 +197,9 @@ func TestBlockPoolTimeout(t *testing.T) { } func TestBlockPoolRemovePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peers := make(testPeers, 10) for i := 0; i < 10; i++ { peerID := types.NodeID(fmt.Sprintf("%d", i+1)) @@ -210,15 +209,10 @@ func TestBlockPoolRemovePeer(t *testing.T) { requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) - pool := NewBlockPool(1, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(log.TestingLogger(), 1, requestsCh, errorsCh) + err := pool.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) // add peers for peerID, peer := range peers { diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index 43c3e83cd1..841deb8499 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,18 +1,21 @@ package blocksync import ( + "context" + "errors" "fmt" "runtime/debug" "sync" + "sync/atomic" "time" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/types" ) @@ -49,7 +52,7 @@ func GetChannelDescriptor() *p2p.ChannelDescriptor { type consensusReactor interface { // For when we switch from block sync reactor to the consensus // machine. - SwitchToConsensus(state sm.State, skipWAL bool) + SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) } type peerError struct { @@ -64,6 +67,7 @@ func (e peerError) Error() string { // Reactor handles long-term catchup syncing. type Reactor struct { service.BaseService + logger log.Logger // immutable initialState sm.State @@ -72,7 +76,7 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmsync.AtomicBool + blockSync *atomicBool blockSyncCh *p2p.Channel // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope @@ -84,7 +88,6 @@ type Reactor struct { // blockSyncCh.Out. blockSyncOutBridgeCh chan p2p.Envelope peerUpdates *p2p.PeerUpdates - closeCh chan struct{} requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -94,23 +97,27 @@ type Reactor struct { // stopping the p2p Channel(s). poolWG sync.WaitGroup - metrics *consensus.Metrics + metrics *consensus.Metrics + eventBus *eventbus.EventBus syncStartTime time.Time } // NewReactor returns new reactor instance. func NewReactor( + ctx context.Context, logger log.Logger, state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, consReactor consensusReactor, - blockSyncCh *p2p.Channel, + channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, blockSync bool, metrics *consensus.Metrics, + eventBus *eventbus.EventBus, ) (*Reactor, error) { + if state.LastBlockHeight != store.Height() { return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) } @@ -123,20 +130,26 @@ func NewReactor( requestsCh := make(chan BlockRequest, maxTotalRequesters) errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. + blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor()) + if err != nil { + return nil, err + } + r := &Reactor{ + logger: logger, initialState: state, blockExec: blockExec, store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), + pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh), consReactor: consReactor, - blockSync: tmsync.NewBool(blockSync), + blockSync: newAtomicBool(blockSync), requestsCh: requestsCh, errorsCh: errorsCh, blockSyncCh: blockSyncCh, blockSyncOutBridgeCh: make(chan p2p.Envelope), peerUpdates: peerUpdates, - closeCh: make(chan struct{}), metrics: metrics, + eventBus: eventBus, syncStartTime: time.Time{}, } @@ -151,20 +164,21 @@ func NewReactor( // // If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if r.blockSync.IsSet() { - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } r.poolWG.Add(1) - go r.requestRoutine() + go r.requestRoutine(ctx) r.poolWG.Add(1) - go r.poolRoutine(false) + go r.poolRoutine(ctx, false) } - go r.processBlockSyncCh() - go r.processPeerUpdates() + go r.processBlockSyncCh(ctx) + go r.processBlockSyncBridge(ctx) + go r.processPeerUpdates(ctx) return nil } @@ -174,60 +188,48 @@ func (r *Reactor) OnStart() error { func (r *Reactor) OnStop() { if r.blockSync.IsSet() { if err := r.pool.Stop(); err != nil { - r.Logger.Error("failed to stop pool", "err", err) + r.logger.Error("failed to stop pool", "err", err) } } // wait for the poolRoutine and requestRoutine goroutines to gracefully exit r.poolWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.blockSyncCh.Done() - <-r.peerUpdates.Done() } // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) { +func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID) error { block := r.store.LoadBlock(msg.Height) if block != nil { blockProto, err := block.ToProto() if err != nil { - r.Logger.Error("failed to convert msg to protobuf", "err", err) - return + r.logger.Error("failed to convert msg to protobuf", "err", err) + return err } - r.blockSyncCh.Out <- p2p.Envelope{ + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{Block: blockProto}, - } - - return + }) } - r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) - r.blockSyncCh.Out <- p2p.Envelope{ + r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) + + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.NoBlockResponse{Height: msg.Height}, - } + }) } // handleBlockSyncMessage handles envelopes sent from peers on the // BlockSyncChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleBlockSyncMessage(ctx context.Context, envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *bcproto.BlockRequest: - r.respondToPeer(msg, envelope.From) - + return r.respondToPeer(ctx, msg, envelope.From) case *bcproto.BlockResponse: block, err := types.BlockFromProto(msg.Block) if err != nil { @@ -238,14 +240,13 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { r.pool.AddBlock(envelope.From, block, block.Size()) case *bcproto.StatusRequest: - r.blockSyncCh.Out <- p2p.Envelope{ + return r.blockSyncCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &bcproto.StatusResponse{ Height: r.store.Height(), Base: r.store.Base(), }, - } - + }) case *bcproto.StatusResponse: r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height) @@ -262,11 +263,11 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -274,11 +275,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) + r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) switch chID { case BlockSyncChannel: - err = r.handleBlockSyncMessage(envelope) + err = r.handleBlockSyncMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -292,34 +293,42 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockSyncCh() { - defer r.blockSyncCh.Close() - - for { - select { - case envelope := <-r.blockSyncCh.In: - if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) - r.blockSyncCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processBlockSyncCh(ctx context.Context) { + iter := r.blockSyncCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.blockSyncCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return } - case envelope := <-r.blockSyncOutBridgeCh: - r.blockSyncCh.Out <- envelope + r.logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return + } + } + } +} - case <-r.closeCh: - r.Logger.Debug("stopped listening on block sync channel; closing...") +func (r *Reactor) processBlockSyncBridge(ctx context.Context) { + for { + select { + case <-ctx.Done(): return - + case envelope := <-r.blockSyncOutBridgeCh: + if err := r.blockSyncCh.Send(ctx, envelope); err != nil { + return + } } } } // processPeerUpdate processes a PeerUpdate. func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) // XXX: Pool#RedoRequest can sometimes give us an empty peer. if len(peerUpdate.NodeID) == 0 { @@ -345,44 +354,40 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { + case <-ctx.Done(): + return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return } } } // SwitchToBlockSync is called by the state sync reactor when switching to fast // sync. -func (r *Reactor) SwitchToBlockSync(state sm.State) error { +func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { r.blockSync.Set() r.initialState = state r.pool.height = state.LastBlockHeight + 1 - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } r.syncStartTime = time.Now() r.poolWG.Add(1) - go r.requestRoutine() + go r.requestRoutine(ctx) r.poolWG.Add(1) - go r.poolRoutine(true) + go r.poolRoutine(ctx, true) return nil } -func (r *Reactor) requestRoutine() { +func (r *Reactor) requestRoutine(ctx context.Context) { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() @@ -390,33 +395,32 @@ func (r *Reactor) requestRoutine() { for { select { - case <-r.closeCh: + case <-ctx.Done(): return - - case <-r.pool.Quit(): - return - case request := <-r.requestsCh: r.blockSyncOutBridgeCh <- p2p.Envelope{ To: request.PeerID, Message: &bcproto.BlockRequest{Height: request.Height}, } - case pErr := <-r.errorsCh: - r.blockSyncCh.Error <- p2p.PeerError{ + if err := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, + }); err != nil { + return } - case <-statusUpdateTicker.C: r.poolWG.Add(1) go func() { defer r.poolWG.Done() - r.blockSyncOutBridgeCh <- p2p.Envelope{ + select { + case r.blockSyncOutBridgeCh <- p2p.Envelope{ Broadcast: true, Message: &bcproto.StatusRequest{}, + }: + case <-ctx.Done(): } }() } @@ -427,7 +431,7 @@ func (r *Reactor) requestRoutine() { // do. // // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (r *Reactor) poolRoutine(stateSynced bool) { +func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool) { var ( trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond) switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second) @@ -457,7 +461,7 @@ FOR_LOOP: lastAdvance = r.pool.LastAdvance() ) - r.Logger.Debug( + r.logger.Debug( "consensus ticker", "num_pending", numPending, "total", lenRequesters, @@ -466,13 +470,13 @@ FOR_LOOP: switch { case r.pool.IsCaughtUp(): - r.Logger.Info("switching to consensus reactor", "height", height) + r.logger.Info("switching to consensus reactor", "height", height) case time.Since(lastAdvance) > syncTimeout: - r.Logger.Error("no progress since last advance", "last_advance", lastAdvance) + r.logger.Error("no progress since last advance", "last_advance", lastAdvance) default: - r.Logger.Info( + r.logger.Info( "not caught up yet", "height", height, "max_peer_height", r.pool.MaxPeerHeight(), @@ -482,13 +486,13 @@ FOR_LOOP: } if err := r.pool.Stop(); err != nil { - r.Logger.Error("failed to stop pool", "err", err) + r.logger.Error("failed to stop pool", "err", err) } r.blockSync.UnSet() if r.consReactor != nil { - r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) + r.consReactor.SwitchToConsensus(ctx, state, blocksSynced > 0 || stateSynced) } break FOR_LOOP @@ -519,8 +523,15 @@ FOR_LOOP: didProcessCh <- struct{}{} } + firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + r.logger.Error("failed to make ", + "height", first.Height, + "err", err.Error()) + break FOR_LOOP + } + var ( - firstParts = first.MakePartSet(types.BlockPartSizeBytes) firstPartSetHeader = firstParts.Header() firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} ) @@ -530,10 +541,9 @@ FOR_LOOP: // NOTE: We can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit) - if err != nil { + if err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit); err != nil { err = fmt.Errorf("invalid last commit: %w", err) - r.Logger.Error( + r.logger.Error( err.Error(), "last_commit", second.LastCommit, "block_id", firstID, @@ -543,16 +553,20 @@ FOR_LOOP: // NOTE: We've already removed the peer's request, but we still need // to clean up the rest. peerID := r.pool.RedoRequest(first.Height) - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID, Err: err, + }); serr != nil { + break FOR_LOOP } peerID2 := r.pool.RedoRequest(second.Height) if peerID2 != peerID { - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := r.blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID2, Err: err, + }); serr != nil { + break FOR_LOOP } } @@ -567,7 +581,7 @@ FOR_LOOP: // TODO: Same thing for app - but we would need a way to get the hash // without persisting the state. - state, err = r.blockExec.ApplyBlock(state, firstID, first) + state, err = r.blockExec.ApplyBlock(ctx, state, firstID, first) if err != nil { // TODO: This is bad, are we zombie? panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -579,7 +593,7 @@ FOR_LOOP: if blocksSynced%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.Logger.Info( + r.logger.Info( "block sync rate", "height", r.pool.height, "max_peer_height", r.pool.MaxPeerHeight(), @@ -592,9 +606,9 @@ FOR_LOOP: continue FOR_LOOP - case <-r.closeCh: + case <-ctx.Done(): break FOR_LOOP - case <-r.pool.Quit(): + case <-r.pool.exitedCh: break FOR_LOOP } } @@ -627,3 +641,32 @@ func (r *Reactor) GetRemainingSyncTime() time.Duration { return time.Duration(int64(remain * float64(time.Second))) } + +func (r *Reactor) PublishStatus(ctx context.Context, event types.EventDataBlockSyncStatus) error { + if r.eventBus == nil { + return errors.New("event bus is not configured") + } + return r.eventBus.PublishEventBlockSyncStatus(ctx, event) +} + +// atomicBool is an atomic Boolean, safe for concurrent use by multiple +// goroutines. +type atomicBool int32 + +// newAtomicBool creates an atomicBool with given initial value. +func newAtomicBool(ok bool) *atomicBool { + ab := new(atomicBool) + if ok { + ab.Set() + } + return ab +} + +// Set sets the Boolean to true. +func (ab *atomicBool) Set() { atomic.StoreInt32((*int32)(ab), 1) } + +// UnSet sets the Boolean to false. +func (ab *atomicBool) UnSet() { atomic.StoreInt32((*int32)(ab), 0) } + +// IsSet returns whether the Boolean is true. +func (ab *atomicBool) IsSet() bool { return atomic.LoadInt32((*int32)(ab))&1 == 1 } diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 6bca8d4a92..25814a2ea1 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,6 +1,7 @@ package blocksync import ( + "context" "os" "testing" "time" @@ -41,6 +42,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -49,13 +51,16 @@ func setup( ) *reactorTestSuite { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + numNodes := len(maxBlockHeights) require.True(t, numNodes >= 1, "must specify at least one block height (nodes)") rts := &reactorTestSuite{ logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]proxy.AppConns, numNodes), @@ -66,21 +71,21 @@ func setup( } chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} - rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) i := 0 for nodeID := range rts.network.Nodes { - rts.addNode(t, nodeID, genDoc, privVal, maxBlockHeights[i]) + rts.addNode(ctx, t, nodeID, genDoc, privVal, maxBlockHeights[i]) i++ } t.Cleanup(func() { + cancel() for _, nodeID := range rts.nodes { - rts.peerUpdates[nodeID].Close() - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.NoError(t, rts.app[nodeID].Stop()) + rts.reactors[nodeID].Wait() + rts.app[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) } } @@ -89,7 +94,9 @@ func setup( return rts } -func (rts *reactorTestSuite) addNode(t *testing.T, +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -97,9 +104,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) { t.Helper() + logger := log.TestingLogger() + rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics()) - require.NoError(t, rts.app[nodeID].Start()) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, rts.app[nodeID].Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() @@ -127,6 +136,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, lastBlock := blockStore.LoadBlock(blockHeight - 1) vote, err := factory.MakeVote( + ctx, privVal, lastBlock.Header.ChainID, 0, lastBlock.Header.Height, 0, 2, @@ -134,7 +144,6 @@ func (rts *reactorTestSuite) addNode(t *testing.T, time.Now(), ) require.NoError(t, err) - lastCommit = types.NewCommit( vote.Height, vote.Round, @@ -143,11 +152,13 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) } - thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) + thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit) + require.NoError(t, err) + thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, err = blockExec.ApplyBlock(ctx, state, blockID, thisBlock) require.NoError(t, err) blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -155,26 +166,33 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.blockSyncChannels[nodeID], nil + } rts.reactors[nodeID], err = NewReactor( + ctx, rts.logger.With("nodeID", nodeID), state.Copy(), blockExec, blockStore, nil, - rts.blockSyncChannels[nodeID], + chCreator, rts.peerUpdates[nodeID], rts.blockSync, - consensus.NopMetrics()) + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) require.NoError(t, err) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } -func (rts *reactorTestSuite) start(t *testing.T) { +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() - rts.network.Start(t) + rts.network.Start(ctx, t) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), len(rts.nodes)-1, @@ -182,17 +200,22 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(64) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) secondaryPool := rts.reactors[rts.nodes[1]].pool @@ -213,19 +236,24 @@ func TestReactor_AbruptDisconnect(t *testing.T) { Status: p2p.PeerStatusDown, NodeID: rts.nodes[0], } - rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0]) + rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(ctx, rts.nodes[0]) } func TestReactor_SyncTime(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(101) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) require.Eventually( t, @@ -240,17 +268,22 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - cfg := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) maxBlockHeight := int64(65) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) testCases := []struct { height int64 @@ -287,17 +320,22 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - cfg := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) require.Eventually( t, @@ -325,12 +363,13 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 - otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30) - newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ + valSet, otherPrivVals := factory.ValidatorSet(ctx, t, 1, 30) + otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) + newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), }) - rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) + rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) // add a fake peer just so we do not wait for the consensus ticker to timeout rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index a826ef79bc..6fad4e1638 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -7,6 +7,7 @@ import ( "path" "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,14 +15,15 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/libs/time" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -30,6 +32,13 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + // empirically, this test either passes in <1s or hits some + // kind of deadlock and hit the larger timeout. This timeout + // can be extended a bunch if needed, but it's good to avoid + // falling back to a much coarser timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + config := configSetup(t) nValidators := 4 @@ -38,7 +47,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { tickerFunc := newMockTickerFunc(true) appFunc := newKVStore - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30) + genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil) states := make([]*State, nValidators) for i := 0; i < nValidators; i++ { @@ -50,11 +60,13 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) require.NoError(t, stateStore.Save(state)) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() + ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := appFunc(t, logger) vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) @@ -62,13 +74,17 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + mtx := new(sync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -80,26 +96,23 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make State blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(cs.Logger) + cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) // set private validator pv := privVals[i] - cs.SetPrivValidator(pv) + cs.SetPrivValidator(ctx, pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + err = eventBus.Start(ctx) require.NoError(t, err) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger) states[i] = cs }() } - rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nValidators, states, 100) // buffer must be large enough to not deadlock var bzNodeID types.NodeID @@ -116,45 +129,44 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { bzReactor := rts.reactors[bzNodeID] // alter prevote so that the byzantine node double votes when height is 2 - bzNodeState.doPrevote = func(height int64, round int32) { + bzNodeState.doPrevote = func(ctx context.Context, height int64, round int32) { // allow first height to happen normally so that byzantine validator is no longer proposer if height == prevoteHeight { - prevote1, err := bzNodeState.signVote( + prevote1, err := bzNodeState.signVote(ctx, tmproto.PrevoteType, bzNodeState.ProposalBlock.Hash(), bzNodeState.ProposalBlockParts.Header(), ) require.NoError(t, err) - prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + prevote2, err := bzNodeState.signVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) require.NoError(t, err) // send two votes to all peers (1st to one half, 2nd to another half) i := 0 for _, ps := range bzReactor.peers { if i < len(bzReactor.peers)/2 { - bzNodeState.Logger.Info("signed and pushed vote", "vote", prevote1, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote1.ToProto(), - }, - } + require.NoError(t, bzReactor.voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote1.ToProto(), + }, + })) } else { - bzNodeState.Logger.Info("signed and pushed vote", "vote", prevote2, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote2.ToProto(), - }, - } + require.NoError(t, bzReactor.voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote2.ToProto(), + }, + })) } i++ } } else { - bzNodeState.Logger.Info("behaving normally") - bzNodeState.defaultDoPrevote(height, round) + bzNodeState.defaultDoPrevote(ctx, height, round) } } @@ -164,11 +176,11 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // lazyProposer := states[1] lazyNodeState := states[1] - lazyNodeState.decideProposal = func(height int64, round int32) { - lazyNodeState.Logger.Info("Lazy Proposer proposing condensed commit") + lazyNodeState.decideProposal = func(ctx context.Context, height int64, round int32) { require.NotNil(t, lazyNodeState.privValidator) var commit *types.Commit + var votes []*types.Vote switch { case lazyNodeState.Height == lazyNodeState.state.InitialHeight: // We're creating a proposal for the first block. @@ -177,8 +189,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { case lazyNodeState.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = lazyNodeState.LastCommit.MakeCommit() + votes = lazyNodeState.LastCommit.GetVotes() default: // This shouldn't happen. - lazyNodeState.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") + lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return } @@ -188,68 +201,74 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { if lazyNodeState.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - lazyNodeState.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) + lazyNodeState.logger.Error("enterPropose", "err", errPubKeyIsNotSet) return } proposerAddr := lazyNodeState.privValidatorPubKey.Address() - block, blockParts := lazyNodeState.blockExec.CreateProposalBlock( - lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, + block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock( + lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr, votes, ) + require.NoError(t, err) // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. if err := lazyNodeState.wal.FlushAndSync(); err != nil { - lazyNodeState.Logger.Error("Error flushing to disk") + lazyNodeState.logger.Error("error flushing to disk") } // Make proposal propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) + proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() - if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil { + if err := lazyNodeState.privValidator.SignProposal(ctx, lazyNodeState.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - lazyNodeState.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + lazyNodeState.sendInternalMessage(ctx, msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - lazyNodeState.sendInternalMessage(msgInfo{&BlockPartMessage{lazyNodeState.Height, lazyNodeState.Round, part}, ""}) + lazyNodeState.sendInternalMessage(ctx, msgInfo{&BlockPartMessage{ + lazyNodeState.Height, lazyNodeState.Round, part, + }, "", tmtime.Now()}) } - lazyNodeState.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - lazyNodeState.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) } else if !lazyNodeState.replayMode { - lazyNodeState.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + lazyNodeState.logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) } } for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // Evidence should be submitted and committed at the third height but // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) - wg := new(sync.WaitGroup) + var wg sync.WaitGroup i := 0 for _, sub := range rts.subs { wg.Add(1) - go func(j int, s types.Subscription) { + go func(j int, s eventbus.Subscription) { defer wg.Done() for { - select { - case msg := <-s.Out(): - require.NotNil(t, msg) - block := msg.Data().(types.EventDataNewBlock).Block - if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] - return - } - case <-s.Canceled(): - require.Fail(t, "subscription failed for %d", j) + if ctx.Err() != nil { + return + } + + msg, err := s.Next(ctx) + assert.NoError(t, err) + if err != nil { + cancel() + return + } + + require.NotNil(t, msg) + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence.Evidence[0] return } } @@ -260,16 +279,15 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { wg.Wait() - pubkey, err := bzNodeState.privValidator.GetPubKey(context.Background()) + pubkey, err := bzNodeState.privValidator.GetPubKey(ctx) require.NoError(t, err) for idx, ev := range evidenceFromEachValidator { - if assert.NotNil(t, ev, idx) { - ev, ok := ev.(*types.DuplicateVoteEvidence) - assert.True(t, ok) - assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) - assert.Equal(t, prevoteHeight, ev.Height()) - } + require.NotNil(t, ev, idx) + ev, ok := ev.(*types.DuplicateVoteEvidence) + require.True(t, ok) + assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) + assert.Equal(t, prevoteHeight, ev.Height()) } } @@ -291,7 +309,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // // give the byzantine validator a normal ticker // ticker := NewTimeoutTicker() - // ticker.SetLogger(states[0].Logger) + // ticker.SetLogger(states[0].logger) // states[0].SetTimeoutTicker(ticker) // p2pLogger := logger.With("module", "p2p") @@ -306,7 +324,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // eventBus.SetLogger(logger.With("module", "events", "validator", i)) // var err error - // blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + // blocksSubs[i], err = eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock) // require.NoError(t, err) // conR := NewReactor(states[i], true) // so we don't start the consensus states @@ -517,7 +535,7 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { // } // // Create peerState for peer -// peerState := NewPeerState(peer).SetLogger(br.reactor.Logger) +// peerState := NewPeerState(peer).SetLogger(br.reactor.logger) // peer.Set(types.PeerStateKey, peerState) // // Send our state to peer. diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 8b54f6026d..72a6bb1a59 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -3,12 +3,12 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "os" - "path" "path/filepath" + "sort" "sync" "testing" "time" @@ -21,15 +21,15 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -50,30 +50,35 @@ type cleanupFunc func() func configSetup(t *testing.T) *config.Config { t.Helper() - cfg := ResetConfig("consensus_reactor_test") + cfg, err := ResetConfig("consensus_reactor_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - consensusReplayConfig := ResetConfig("consensus_replay_test") - configStateTest := ResetConfig("consensus_state_test") - configMempoolTest := ResetConfig("consensus_mempool_test") - configByzantineTest := ResetConfig("consensus_byzantine_test") + consensusReplayConfig, err := ResetConfig("consensus_replay_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) }) + + configStateTest, err := ResetConfig("consensus_state_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) }) + + configMempoolTest, err := ResetConfig("consensus_mempool_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) }) + + configByzantineTest, err := ResetConfig("consensus_byzantine_test") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - os.RemoveAll(consensusReplayConfig.RootDir) - os.RemoveAll(configStateTest.RootDir) - os.RemoveAll(configMempoolTest.RootDir) - os.RemoveAll(configByzantineTest.RootDir) - }) return cfg } -func ensureDir(dir string, mode os.FileMode) { - if err := tmos.EnsureDir(dir, mode); err != nil { - panic(err) - } +func ensureDir(t *testing.T, dir string, mode os.FileMode) { + t.Helper() + require.NoError(t, tmos.EnsureDir(dir, mode)) } -func ResetConfig(name string) *config.Config { +func ResetConfig(name string) (*config.Config, error) { return config.ResetTestRoot(name) } @@ -84,6 +89,7 @@ type validatorStub struct { Index int32 // Validator index. NOTE: we don't assume validator set changes. Height int64 Round int32 + clock tmtime.Source types.PrivValidator VotingPower int64 lastVote *types.Vote @@ -96,16 +102,17 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida Index: valIndex, PrivValidator: privValidator, VotingPower: testMinPower, + clock: tmtime.DefaultSource{}, } } func (vs *validatorStub) signVote( - cfg *config.Config, + ctx context.Context, voteType tmproto.SignedMsgType, - hash []byte, - header types.PartSetHeader) (*types.Vote, error) { + chainID string, + blockID types.BlockID) (*types.Vote, error) { - pubKey, err := vs.PrivValidator.GetPubKey(context.Background()) + pubKey, err := vs.PrivValidator.GetPubKey(ctx) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } @@ -115,12 +122,13 @@ func (vs *validatorStub) signVote( ValidatorAddress: pubKey.Address(), Height: vs.Height, Round: vs.Round, - Timestamp: tmtime.Now(), + Timestamp: vs.clock.Now(), Type: voteType, - BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + BlockID: blockID, + VoteExtension: types.VoteExtensionFromProto(kvstore.ConstructVoteExtension(pubKey.Address())), } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), v); err != nil { + if err := vs.PrivValidator.SignVote(ctx, chainID, v); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -138,16 +146,19 @@ func (vs *validatorStub) signVote( // Sign vote for type/hash/header func signVote( + ctx context.Context, + t *testing.T, vs *validatorStub, - cfg *config.Config, voteType tmproto.SignedMsgType, - hash []byte, - header types.PartSetHeader) *types.Vote { + chainID string, + blockID types.BlockID) *types.Vote { - v, err := vs.signVote(cfg, voteType, hash, header) - if err != nil { - panic(fmt.Errorf("failed to sign vote: %v", err)) - } + v, err := vs.signVote(ctx, voteType, chainID, blockID) + require.NoError(t, err, "failed to sign vote") + + // TODO: remove hardcoded vote extension. + // currently set for abci/examples/kvstore/persistent_kvstore.go + v.VoteExtension = types.VoteExtensionFromProto(kvstore.ConstructVoteExtension(v.ValidatorAddress)) vs.lastVote = v @@ -155,14 +166,16 @@ func signVote( } func signVotes( - cfg *config.Config, + ctx context.Context, + t *testing.T, voteType tmproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, - vss ...*validatorStub) []*types.Vote { + chainID string, + blockID types.BlockID, + vss ...*validatorStub, +) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, cfg, voteType, hash, header) + votes[i] = signVote(ctx, t, vs, voteType, chainID, blockID) } return votes } @@ -185,61 +198,61 @@ func (vss ValidatorStubsByPower) Len() int { return len(vss) } -func (vss ValidatorStubsByPower) Less(i, j int) bool { - vssi, err := vss[i].GetPubKey(context.Background()) - if err != nil { - panic(err) - } - vssj, err := vss[j].GetPubKey(context.Background()) - if err != nil { - panic(err) - } +func sortVValidatorStubsByPower(ctx context.Context, t *testing.T, vss []*validatorStub) []*validatorStub { + t.Helper() + sort.Slice(vss, func(i, j int) bool { + vssi, err := vss[i].GetPubKey(ctx) + require.NoError(t, err) + + vssj, err := vss[j].GetPubKey(ctx) + require.NoError(t, err) + + if vss[i].VotingPower == vss[j].VotingPower { + return bytes.Compare(vssi.Address(), vssj.Address()) == -1 + } + return vss[i].VotingPower > vss[j].VotingPower + }) - if vss[i].VotingPower == vss[j].VotingPower { - return bytes.Compare(vssi.Address(), vssj.Address()) == -1 + for idx, vs := range vss { + vs.Index = int32(idx) } - return vss[i].VotingPower > vss[j].VotingPower -} -func (vss ValidatorStubsByPower) Swap(i, j int) { - it := vss[i] - vss[i] = vss[j] - vss[i].Index = int32(i) - vss[j] = it - vss[j].Index = int32(j) + return vss } //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *State, height int64, round int32) { - cs.enterNewRound(height, round) - cs.startRoutines(0) +func startTestRound(ctx context.Context, cs *State, height int64, round int32) { + cs.enterNewRound(ctx, height, round) + cs.startRoutines(ctx, 0) } // Create proposal block from cs1 but sign it with vs. func decideProposal( + ctx context.Context, + t *testing.T, cs1 *State, vs *validatorStub, height int64, round int32, ) (proposal *types.Proposal, block *types.Block) { + t.Helper() + cs1.mtx.Lock() - block, blockParts := cs1.createProposalBlock() + block, blockParts, err := cs1.createProposalBlock() + require.NoError(t, err) validRound := cs1.ValidRound chainID := cs1.state.ChainID cs1.mtx.Unlock() - if block == nil { - panic("Failed to createProposalBlock. Did you forget to add commit for previous block?") - } + + require.NotNil(t, block, "Failed to createProposalBlock. Did you forget to add commit for previous block?") // Make proposal polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal = types.NewProposal(height, round, polRound, propBlockID) + proposal = types.NewProposal(height, round, polRound, propBlockID, block.Header.Time) p := proposal.ToProto() - if err := vs.SignProposal(context.Background(), chainID, p); err != nil { - panic(err) - } + require.NoError(t, vs.SignProposal(ctx, chainID, p)) proposal.Signature = p.Signature @@ -253,52 +266,63 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( - cfg *config.Config, + ctx context.Context, + t *testing.T, to *State, voteType tmproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, + chainID string, + blockID types.BlockID, vss ...*validatorStub, ) { - votes := signVotes(cfg, voteType, hash, header, vss...) - addVotes(to, votes...) + addVotes(to, signVotes(ctx, t, voteType, chainID, blockID, vss...)...) } -func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { +func validatePrevote( + ctx context.Context, + t *testing.T, + cs *State, + round int32, + privVal *validatorStub, + blockHash []byte, +) { + t.Helper() + + cs.mtx.RLock() + defer cs.mtx.RUnlock() + prevotes := cs.Votes.Prevotes(round) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) + address := pubKey.Address() - var vote *types.Vote - if vote = prevotes.GetByAddress(address); vote == nil { - panic("Failed to find prevote from validator") - } + + vote := prevotes.GetByAddress(address) + require.NotNil(t, vote, "Failed to find prevote from validator") + if blockHash == nil { - if vote.BlockID.Hash != nil { - panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)) - } + require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash) } else { - if !bytes.Equal(vote.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)) - } + require.True(t, bytes.Equal(vote.BlockID.Hash, blockHash), "Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash) } } -func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { +func validateLastPrecommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { + t.Helper() + votes := cs.LastCommit - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() - var vote *types.Vote - if vote = votes.GetByAddress(address); vote == nil { - panic("Failed to find precommit from validator") - } - if !bytes.Equal(vote.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash)) - } + + vote := votes.GetByAddress(address) + require.NotNil(t, vote) + + require.True(t, bytes.Equal(vote.BlockID.Hash, blockHash), + "Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash) } func validatePrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -307,70 +331,74 @@ func validatePrecommit( votedBlockHash, lockedBlockHash []byte, ) { + t.Helper() + precommits := cs.Votes.Precommits(thisRound) - pv, err := privVal.GetPubKey(context.Background()) + pv, err := privVal.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() - var vote *types.Vote - if vote = precommits.GetByAddress(address); vote == nil { - panic("Failed to find precommit from validator") - } + + vote := precommits.GetByAddress(address) + require.NotNil(t, vote, "Failed to find precommit from validator") if votedBlockHash == nil { - if vote.BlockID.Hash != nil { - panic("Expected precommit to be for nil") - } + require.Nil(t, vote.BlockID.Hash, "Expected precommit to be for nil") } else { - if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) { - panic("Expected precommit to be for proposal block") - } + require.True(t, bytes.Equal(vote.BlockID.Hash, votedBlockHash), "Expected precommit to be for proposal block") } if lockedBlockHash == nil { - if cs.LockedRound != lockRound || cs.LockedBlock != nil { - panic(fmt.Sprintf( - "Expected to be locked on nil at round %d. Got locked at round %d with block %v", - lockRound, - cs.LockedRound, - cs.LockedBlock)) - } + require.False(t, cs.LockedRound != lockRound || cs.LockedBlock != nil, + "Expected to be locked on nil at round %d. Got locked at round %d with block %v", + lockRound, + cs.LockedRound, + cs.LockedBlock) } else { - if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { - panic(fmt.Sprintf( - "Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", - lockRound, - cs.LockedRound, - cs.LockedBlock.Hash(), - lockedBlockHash)) - } + require.False(t, cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash), + "Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", + lockRound, + cs.LockedRound, + cs.LockedBlock.Hash(), + lockedBlockHash) } } -func validatePrevoteAndPrecommit( - t *testing.T, - cs *State, - thisRound, - lockRound int32, - privVal *validatorStub, - votedBlockHash, - lockedBlockHash []byte, -) { - // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) - // verify precommit - cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() +func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { + t.Helper() + + ch := make(chan tmpubsub.Message, 1) + if err := cs.eventBus.Observe(ctx, func(msg tmpubsub.Message) error { + vote := msg.Data().(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(addr, vote.Vote.ValidatorAddress) { + ch <- msg + } + return nil + }, types.EventQueryVote); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } + return ch } -func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) +func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, addr []byte) <-chan tmpubsub.Message { + t.Helper() + votesSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryVote, + Limit: 10}) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) + t.Fatalf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote) } - ch := make(chan tmpubsub.Message) + ch := make(chan tmpubsub.Message, 10) go func() { - for msg := range votesSub.Out() { + for { + msg, err := votesSub.Next(ctx) + if err != nil { + if !errors.Is(err, tmpubsub.ErrTerminated) && !errors.Is(err, context.Canceled) { + t.Errorf("error terminating pubsub %s", err) + } + return + } vote := msg.Data().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { @@ -384,36 +412,61 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { - cfg := config.ResetTestRoot("consensus_state_test") - return newStateWithConfig(cfg, state, pv, app) +func newState( + ctx context.Context, + t *testing.T, + logger log.Logger, + state sm.State, + pv types.PrivValidator, + app abci.Application, +) *State { + t.Helper() + + cfg, err := config.ResetTestRoot("consensus_state_test") + require.NoError(t, err) + + return newStateWithConfig(ctx, t, logger, cfg, state, pv, app) } func newStateWithConfig( + ctx context.Context, + t *testing.T, + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, ) *State { - blockStore := store.NewBlockStore(dbm.NewMemDB()) - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore) + t.Helper() + return newStateWithConfigAndBlockStore(ctx, t, logger, thisConfig, state, pv, app, store.NewBlockStore(dbm.NewMemDB())) } func newStateWithConfigAndBlockStore( + ctx context.Context, + t *testing.T, + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockStore *store.BlockStore, ) *State { + t.Helper() + // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + mtx := new(sync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + + mempool := mempool.NewTxMempool( + logger.With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) + if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -423,44 +476,48 @@ func newStateWithConfigAndBlockStore( // Make State stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(state); err != nil { // for save height 1's validators info - panic(err) - } + require.NoError(t, stateStore.Save(state)) + + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore) + cs := NewState(ctx, + logger.With("module", "consensus"), + thisConfig.Consensus, + state, + blockExec, + blockStore, + mempool, + evpool, + ) + cs.SetPrivValidator(ctx, pv) + + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) - cs.SetPrivValidator(pv) - - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err := eventBus.Start() - if err != nil { - panic(err) - } cs.SetEventBus(eventBus) return cs } -func loadPrivValidator(cfg *config.Config) *privval.FilePV { +func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV { + t.Helper() privValidatorKeyFile := cfg.PrivValidator.KeyFile() - ensureDir(filepath.Dir(privValidatorKeyFile), 0700) + ensureDir(t, filepath.Dir(privValidatorKeyFile), 0700) privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) - if err != nil { - panic(err) - } - privValidator.Reset() + require.NoError(t, err) + require.NoError(t, privValidator.Reset()) return privValidator } -func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { +func makeState(ctx context.Context, t *testing.T, cfg *config.Config, logger log.Logger, nValidators int) (*State, []*validatorStub) { + t.Helper() // Get State - state, privVals := randGenesisState(cfg, nValidators, false, 10) + state, privVals := makeGenesisState(ctx, t, cfg, genesisStateArgs{ + Validators: nValidators, + }) vss := make([]*validatorStub, nValidators) - cs := newState(state, privVals[0], kvstore.NewApplication()) + cs := newState(ctx, t, logger, state, privVals[0], kvstore.NewApplication()) for i := 0; i < nValidators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i)) @@ -473,222 +530,209 @@ func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { //------------------------------------------------------------------------------- -func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration, +func ensureNoMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, timeout time.Duration, errorMessage string) { + t.Helper() select { case <-time.After(timeout): break case <-ch: - panic(errorMessage) + t.Fatal(errorMessage) } } -func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) { - ensureNoNewEvent( +func ensureNoNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) { + t.Helper() + ensureNoMessageBeforeTimeout( + t, ch, ensureTimeout, "We should be stuck waiting, not receiving new event on the channel") } -func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) { - ensureNoNewEvent( +func ensureNoNewRoundStep(t *testing.T, stepCh <-chan tmpubsub.Message) { + t.Helper() + ensureNoMessageBeforeTimeout( + t, stepCh, ensureTimeout, "We should be stuck waiting, not receiving NewRoundStep event") } -func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) { - ensureNoNewEvent( - unlockCh, - ensureTimeout, - "We should be stuck waiting, not receiving Unlock event") -} - -func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) { +func ensureNoNewTimeout(t *testing.T, stepCh <-chan tmpubsub.Message, timeout int64) { + t.Helper() timeoutDuration := time.Duration(timeout*10) * time.Nanosecond - ensureNoNewEvent( + ensureNoMessageBeforeTimeout( + t, stepCh, timeoutDuration, "We should be stuck waiting, not receiving NewTimeout event") } -func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration, errorMessage string) { - select { - case <-time.After(timeout): - panic(errorMessage) - case msg := <-ch: - roundStateEvent, ok := msg.Data().(types.EventDataRoundState) - if !ok { - panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?", - msg.Data())) - } - if roundStateEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height)) - } - if roundStateEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round)) - } - // TODO: We could check also for a step at this point! - } +func ensureNewEvent(t *testing.T, ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, ch, ensureTimeout) + roundStateEvent, ok := msg.Data().(types.EventDataRoundState) + require.True(t, ok, + "expected a EventDataRoundState, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, roundStateEvent.Height) + require.Equal(t, round, roundStateEvent.Round) + // TODO: We could check also for a step at this point! } -func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewRound event") - case msg := <-roundCh: - newRoundEvent, ok := msg.Data().(types.EventDataNewRound) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?", - msg.Data())) - } - if newRoundEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height)) - } - if newRoundEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round)) - } - } +func ensureNewRound(t *testing.T, roundCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, roundCh, ensureTimeout) + newRoundEvent, ok := msg.Data().(types.EventDataNewRound) + require.True(t, ok, "expected a EventDataNewRound, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, newRoundEvent.Height) + require.Equal(t, round, newRoundEvent.Round) } -func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) { +func ensureNewTimeout(t *testing.T, timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) { + t.Helper() timeoutDuration := time.Duration(timeout*10) * time.Nanosecond - ensureNewEvent(timeoutCh, height, round, timeoutDuration, - "Timeout expired while waiting for NewTimeout event") + ensureNewEvent(t, timeoutCh, height, round, timeoutDuration) } -func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewProposal event") - case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) - if !ok { - panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", - msg.Data())) - } - if proposalEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height)) - } - if proposalEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) - } - } +func ensureNewProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32) types.BlockID { + t.Helper() + msg := ensureMessageBeforeTimeout(t, proposalCh, ensureTimeout) + proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + require.True(t, ok, "expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, proposalEvent.Height) + require.Equal(t, round, proposalEvent.Round) + return proposalEvent.BlockID } -func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int32) { - ensureNewEvent(validBlockCh, height, round, ensureTimeout, - "Timeout expired while waiting for NewValidBlock event") +func ensureNewValidBlock(t *testing.T, validBlockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, validBlockCh, height, round, ensureTimeout) } -func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewBlock event") - case msg := <-blockCh: - blockEvent, ok := msg.Data().(types.EventDataNewBlock) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?", - msg.Data())) - } - if blockEvent.Block.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height)) - } - } +func ensureNewBlock(t *testing.T, blockCh <-chan tmpubsub.Message, height int64) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout) + blockEvent, ok := msg.Data().(types.EventDataNewBlock) + require.True(t, ok, "expected a EventDataNewBlock, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, blockEvent.Block.Height) } -func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewBlockHeader event") - case msg := <-blockCh: - blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", - msg.Data())) - } - if blockHeaderEvent.Header.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height)) - } - if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) { - panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash())) - } - } +func ensureNewBlockHeader(t *testing.T, blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout) + blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader) + require.True(t, ok, "expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, blockHeaderEvent.Header.Height) + require.True(t, bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash)) } -func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32) { - ensureNewEvent(unlockCh, height, round, ensureTimeout, - "Timeout expired while waiting for NewUnlock event") +func ensureLock(t *testing.T, lockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, lockCh, height, round, ensureTimeout) } -func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewProposal event") - case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) - if !ok { - panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", - msg.Data())) - } - if proposalEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height)) - } - if proposalEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) - } - if !proposalEvent.BlockID.Equals(propID) { - panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID)) - } +func ensureRelock(t *testing.T, relockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, relockCh, height, round, ensureTimeout) +} + +func ensureProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { + ensureProposalWithTimeout(t, proposalCh, height, round, &propID, ensureTimeout) +} + +func ensureProposalWithTimeout(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID *types.BlockID, timeout time.Duration) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, proposalCh, timeout) + proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + require.True(t, ok, "expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, proposalEvent.Height) + require.Equal(t, round, proposalEvent.Round) + if propID != nil { + require.True(t, proposalEvent.BlockID.Equals(*propID), + "Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID) } } -func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, tmproto.PrecommitType) +func ensurePrecommit(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureVote(t, voteCh, height, round, tmproto.PrecommitType) } -func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, tmproto.PrevoteType) +func ensurePrevote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureVote(t, voteCh, height, round, tmproto.PrevoteType) } -func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32, - voteType tmproto.SignedMsgType) { +func ensurePrevoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) { + t.Helper() + ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrevoteType) +} + +func ensurePrecommitMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) { + t.Helper() + ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrecommitType) +} + +func ensureVoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte, voteType tmproto.SignedMsgType) { + t.Helper() select { case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewVote event") + t.Fatal("Timeout expired while waiting for NewVote event") case msg := <-voteCh: voteEvent, ok := msg.Data().(types.EventDataVote) - if !ok { - panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?", - msg.Data())) - } + require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?", + msg.Data()) + vote := voteEvent.Vote - if vote.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) - } - if vote.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) - } - if vote.Type != voteType { - panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + require.Equal(t, height, vote.Height) + require.Equal(t, round, vote.Round) + + require.Equal(t, voteType, vote.Type) + if hash == nil { + require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash) + } else { + require.True(t, bytes.Equal(vote.BlockID.Hash, hash), "Expected prevote to be for %X, got %X", hash, vote.BlockID.Hash) } } } +func ensureVote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, voteType tmproto.SignedMsgType) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, voteCh, ensureTimeout) + voteEvent, ok := msg.Data().(types.EventDataVote) + require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?", + msg.Data()) -func ensurePrecommitTimeout(ch <-chan tmpubsub.Message) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for the Precommit to Timeout") - case <-ch: - } + vote := voteEvent.Vote + require.Equal(t, height, vote.Height) + require.Equal(t, round, vote.Round) + + require.Equal(t, voteType, vote.Type) } -func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) { +func ensureNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) { + t.Helper() + ensureMessageBeforeTimeout(t, ch, ensureTimeout) +} + +func ensureMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, to time.Duration) tmpubsub.Message { + t.Helper() select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for new activity on the channel") - case <-ch: + case <-time.After(to): + t.Fatalf("Timeout expired while waiting for message") + case msg := <-ch: + return msg } + panic("unreachable") } //------------------------------------------------------------------------------- @@ -700,17 +744,20 @@ func consensusLogger() log.Logger { return log.TestingLogger().With("module", "consensus") } -func randConsensusState( +func makeConsensusState( + ctx context.Context, t *testing.T, cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application, + appFunc func(t *testing.T, logger log.Logger) abci.Application, configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { + t.Helper() - genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) css := make([]*State, nValidators) logger := consensusLogger() @@ -722,16 +769,18 @@ func randConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { opt(thisConfig) } - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() + app := appFunc(t, logger) if appCloser, ok := app.(io.Closer); ok { closeFuncs = append(closeFuncs, appCloser.Close) @@ -740,9 +789,9 @@ func randConsensusState( vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) + l := logger.With("validator", i, "module", "consensus") + css[i] = newStateWithConfigAndBlockStore(ctx, t, l, thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, func() { @@ -757,24 +806,32 @@ func randConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( + ctx context.Context, + t *testing.T, cfg *config.Config, - nValidators, + nValidators int, nPeers int, testName string, tickerFunc func() TimeoutTicker, - appFunc func(string) abci.Application, + appFunc func(log.Logger, string) abci.Application, ) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, testMinPower) + t.Helper() + + valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, testMinPower) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) css := make([]*State, nPeers) + t.Helper() logger := consensusLogger() var peer0Config *config.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) + configRootDirs = append(configRootDirs, thisConfig.RootDir) - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal if i == 0 { peer0Config = thisConfig } @@ -782,22 +839,17 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - if err != nil { - panic(err) - } - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - if err != nil { - panic(err) - } + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") - if err != nil { - panic(err) - } + require.NoError(t, err) } - app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + app := appFunc(logger, filepath.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { // simulate handshake, receive app version. If don't do this, replay test will fail @@ -806,9 +858,8 @@ func randConsensusNetWithPeers( app.InitChain(abci.RequestInitChain{Validators: vals}) // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above - css[i] = newStateWithConfig(thisConfig, state, privVal, app) + css[i] = newStateWithConfig(ctx, t, logger.With("validator", i, "module", "consensus"), thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { @@ -817,21 +868,38 @@ func randConsensusNetWithPeers( } } -func randGenesisState( - cfg *config.Config, - numValidators int, - randPower bool, - minPower int64) (sm.State, []types.PrivValidator) { +type genesisStateArgs struct { + Validators int + Power int64 + Params *types.ConsensusParams + Time time.Time +} - genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, randPower, minPower) - s0, _ := sm.MakeGenesisState(genDoc) +func makeGenesisState(ctx context.Context, t *testing.T, cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { + t.Helper() + if args.Power == 0 { + args.Power = 1 + } + if args.Validators == 0 { + args.Power = 4 + } + valSet, privValidators := factory.ValidatorSet(ctx, t, args.Validators, args.Power) + if args.Params == nil { + args.Params = types.DefaultConsensusParams() + } + if args.Time.IsZero() { + args.Time = time.Now() + } + genDoc := factory.GenesisDoc(cfg, args.Time, valSet.Validators, args.Params) + s0, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) return s0, privValidators } func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker { return func() TimeoutTicker { return &mockTicker{ - c: make(chan timeoutInfo, 10), + c: make(chan timeoutInfo, 100), onlyOnce: onlyOnce, } } @@ -847,7 +915,7 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() error { +func (m *mockTicker) Start(context.Context) error { return nil } @@ -855,6 +923,8 @@ func (m *mockTicker) Stop() error { return nil } +func (m *mockTicker) IsRunning() bool { return false } + func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) { m.mtx.Lock() defer m.mtx.Unlock() @@ -873,20 +943,21 @@ func (m *mockTicker) Chan() <-chan timeoutInfo { func (*mockTicker) SetLogger(log.Logger) {} -func newPersistentKVStore() abci.Application { - dir, err := ioutil.TempDir("", "persistent-kvstore") - if err != nil { - panic(err) - } - return kvstore.NewPersistentKVStoreApplication(dir) +func newPersistentKVStore(t *testing.T, logger log.Logger) abci.Application { + t.Helper() + + dir, err := os.MkdirTemp("", "persistent-kvstore") + require.NoError(t, err) + + return kvstore.NewPersistentKVStoreApplication(logger, dir) } -func newKVStore() abci.Application { +func newKVStore(_ *testing.T, _ log.Logger) abci.Application { return kvstore.NewApplication() } -func newPersistentKVStoreWithPath(dbDir string) abci.Application { - return kvstore.NewPersistentKVStoreApplication(dbDir) +func newPersistentKVStoreWithPath(logger log.Logger, dbDir string) abci.Application { + return kvstore.NewPersistentKVStoreApplication(logger, dbDir) } func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 6f858ee112..541b1bbc45 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -5,35 +5,40 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" + tmtime "github.com/tendermint/tendermint/libs/time" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) func TestReactorInvalidPrecommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := makeConsensusState(ctx, t, config, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) for i := 0; i < 4; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) + ticker := NewTimeoutTicker(states[i].logger) states[i].SetTimeoutTicker(ticker) } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // this val sends a random precommit at each height @@ -46,8 +51,8 @@ func TestReactorInvalidPrecommit(t *testing.T) { // block and otherwise disable the priv validator. byzState.mtx.Lock() privVal := byzState.privValidator - byzState.doPrevote = func(height int64, round int32) { - invalidDoPrevoteFunc(t, height, round, byzState, byzReactor, privVal) + byzState.doPrevote = func(ctx context.Context, height int64, round int32) { + invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, privVal) } byzState.mtx.Unlock() @@ -55,13 +60,17 @@ func TestReactorInvalidPrecommit(t *testing.T) { // // TODO: Make this tighter by ensuring the halt happens by block 2. var wg sync.WaitGroup + for i := 0; i < 10; i++ { for _, sub := range rts.subs { wg.Add(1) - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() // cancel other subscribers on failure + } }(sub) } } @@ -69,7 +78,15 @@ func TestReactorInvalidPrecommit(t *testing.T) { wg.Wait() } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) { +func invalidDoPrevoteFunc( + ctx context.Context, + t *testing.T, + height int64, + round int32, + cs *State, + r *Reactor, + pv types.PrivValidator, +) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -78,7 +95,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.mtx.Lock() cs.privValidator = pv - pubKey, err := cs.privValidator.GetPubKey(context.Background()) + pubKey, err := cs.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() @@ -91,7 +108,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r ValidatorIndex: valIndex, Height: cs.Height, Round: cs.Round, - Timestamp: cs.voteTime(), + Timestamp: tmtime.Now(), Type: tmproto.PrecommitType, BlockID: types.BlockID{ Hash: blockHash, @@ -99,7 +116,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r } p := precommit.ToProto() - err = cs.privValidator.SignVote(context.Background(), cs.state.ChainID, p) + err = cs.privValidator.SignVote(ctx, cs.state.ChainID, p) require.NoError(t, err) precommit.Signature = p.Signature @@ -107,14 +124,12 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.mtx.Unlock() for _, ps := range r.peers { - cs.Logger.Info("sending bad vote", "block", blockHash, "peer", ps.peerID) - - r.voteCh.Out <- p2p.Envelope{ + require.NoError(t, r.voteCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.Vote{ Vote: precommit.ToProto(), }, - } + })) } }() } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index c1bc907c96..0cbee4fcd0 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -17,123 +17,151 @@ import ( "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) mempool.Mempool { - return txn.(mempool.Mempool) +func assertMempool(t *testing.T, txn txNotifier) mempool.Mempool { + t.Helper() + mp, ok := txn.(mempool.Mempool) + require.True(t, ok) + return mp } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseConfig := configSetup(t) - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) - assertMempool(cs.txNotifier).EnableTxsAvailable() + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: 10}) + cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) + assertMempool(t, cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) - - ensureNewEventOnChannel(newBlockCh) // first block gets committed - ensureNoNewEventOnChannel(newBlockCh) - deliverTxsRange(cs, 0, 1) - ensureNewEventOnChannel(newBlockCh) // commit txs - ensureNewEventOnChannel(newBlockCh) // commit updated app hash - ensureNoNewEventOnChannel(newBlockCh) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, height, round) + + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(t, newBlockCh) + deliverTxsRange(ctx, t, cs, 0, 1) + ensureNewEventOnChannel(t, newBlockCh) // commit txs + ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash + ensureNoNewEventOnChannel(t, newBlockCh) } func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout - state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: 10}) + cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) - assertMempool(cs.txNotifier).EnableTxsAvailable() + assertMempool(t, cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, cs.Height, cs.Round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, cs.Height, cs.Round) - ensureNewEventOnChannel(newBlockCh) // first block gets committed - ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... - ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(t, newBlockCh) // then we dont make a block ... + ensureNewEventOnChannel(t, newBlockCh) // until the CreateEmptyBlocksInterval has passed } func TestMempoolProgressInHigherRound(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig("consensus_mempool_txs_available_test") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) - assertMempool(cs.txNotifier).EnableTxsAvailable() + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: 10}) + cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication()) + assertMempool(t, cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - cs.setProposal = func(proposal *types.Proposal) error { + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + cs.setProposal = func(proposal *types.Proposal, recvTime time.Time) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and // go to next round - cs.Logger.Info("Ignoring set proposal at height 2, round 0") return nil } - return cs.defaultSetProposal(proposal) + return cs.defaultSetProposal(proposal, recvTime) } - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) - ensureNewRound(newRoundCh, height, round) // first round at first height - ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNewRound(t, newRoundCh, height, round) // first round at first height + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed height++ // moving to the next height round = 0 - ensureNewRound(newRoundCh, height, round) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round - ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + ensureNewRound(t, newRoundCh, height, round) // first round at next height + deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) // wait for the next round - ensureNewEventOnChannel(newBlockCh) // now we can commit the block + round++ // moving to the next round + ensureNewRound(t, newRoundCh, height, round) // wait for the next round + ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block } -func deliverTxsRange(cs *State, start, end int) { +func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) { + t.Helper() // Deliver some txs. for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - if err != nil { - panic(fmt.Sprintf("Error after CheckTx: %v", err)) - } + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) + require.NoError(t, err, "error after checkTx") } } func TestMempoolTxConcurrentWithCommit(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - state, privVals := randGenesisState(config, 1, false, 10) + config := configSetup(t) + logger := log.TestingLogger() + state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{ + Validators: 1, + Power: 10}) stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) + + cs := newStateWithConfigAndBlockStore( + ctx, + t, + logger, config, state, privVals[0], NewCounterApplication(), blockStore) + err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 - go deliverTxsRange(cs, 0, int(numTxs)) + go deliverTxsRange(ctx, t, cs, 0, int(numTxs)) - startTestRound(cs, cs.Height, cs.Round) + startTestRound(ctx, cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { select { case msg := <-newBlockHeaderCh: @@ -147,12 +175,16 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - state, privVals := randGenesisState(config, 1, false, 10) + state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{ + Validators: 1, + Power: 10}) app := NewCounterApplication() stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore) + cs := newStateWithConfigAndBlockStore(ctx, t, log.TestingLogger(), config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) @@ -172,7 +204,7 @@ func TestMempoolRmBadTx(t *testing.T) { // Try to send the tx through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, func(r *abci.Response) { + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.Response) { if r.GetCheckTx().Code != code.CodeTypeBadNonce { t.Errorf("expected checktx to return bad nonce, got %v", r) return @@ -180,13 +212,13 @@ func TestMempoolRmBadTx(t *testing.T) { checkTxRespCh <- struct{}{} }, mempool.TxInfo{}) if err != nil { - t.Errorf("error after CheckTx: %v", err) + t.Errorf("error after CheckTx: %w", err) return } // check for the tx for { - txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1) + txs := assertMempool(t, cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1) if len(txs) == 0 { emptyMempoolCh <- struct{}{} return @@ -270,7 +302,7 @@ func (app *CounterApplication) Commit() abci.ResponseCommit { return abci.ResponseCommit{Data: hash} } -func (app *CounterApplication) PreprocessTxs( - req abci.RequestPreprocessTxs) abci.ResponsePreprocessTxs { - return abci.ResponsePreprocessTxs{Txs: req.Txs} +func (app *CounterApplication) PrepareProposal( + req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { + return abci.ResponsePrepareProposal{BlockData: req.BlockData} } diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index a75f1505c7..b10cebdb56 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -64,6 +64,27 @@ type Metrics struct { // Histogram of time taken per step annotated with reason that the step proceeded. StepTime metrics.Histogram + + // QuroumPrevoteMessageDelay is the interval in seconds between the proposal + // timestamp and the timestamp of the earliest prevote that achieved a quorum + // during the prevote step. + // + // To compute it, sum the voting power over each prevote received, in increasing + // order of timestamp. The timestamp of the first prevote to increase the sum to + // be above 2/3 of the total voting power of the network defines the endpoint + // the endpoint of the interval. Subtract the proposal timestamp from this endpoint + // to obtain the quorum delay. + QuorumPrevoteMessageDelay metrics.Gauge + + // FullPrevoteMessageDelay is the interval in seconds between the proposal + // timestamp and the timestamp of the latest prevote in a round where 100% + // of the voting power on the network issued prevotes. + FullPrevoteMessageDelay metrics.Gauge + + // ProposalTimestampDifference is the difference between the timestamp in + // the proposal message and the local time of the validator at the time + // that the validator received the message. + ProposalTimestampDifference metrics.Histogram } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -196,6 +217,29 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "step_time", Help: "Time spent per step.", }, append(labels, "step", "reason")).With(labelsAndValues...), + QuorumPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "quorum_prevote_message_delay", + Help: "Difference in seconds between the proposal timestamp and the timestamp " + + "of the latest prevote that achieved a quorum in the prevote step.", + }, labels).With(labelsAndValues...), + FullPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "full_prevote_message_delay", + Help: "Difference in seconds between the proposal timestamp and the timestamp " + + "of the latest prevote that achieved 100% of the voting power in the prevote step.", + }, labels).With(labelsAndValues...), + ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_timestamp_difference", + Help: "Difference in seconds between the timestamp in the proposal " + + "message and the local time when the message was received. " + + "Only calculated when a new block is proposed.", + Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, + }, append(labels, "is_timely")).With(labelsAndValues...), } } @@ -219,13 +263,16 @@ func NopMetrics() *Metrics { BlockIntervalSeconds: discard.NewHistogram(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewHistogram(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockSyncing: discard.NewGauge(), - StateSyncing: discard.NewGauge(), - BlockParts: discard.NewCounter(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewHistogram(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + QuorumPrevoteMessageDelay: discard.NewGauge(), + FullPrevoteMessageDelay: discard.NewGauge(), + ProposalTimestampDifference: discard.NewHistogram(), } } diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 052b8f556b..8b19db423b 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -5,8 +5,8 @@ import ( "fmt" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -18,30 +18,34 @@ import ( // converted to a Message via MsgFromProto. type Message interface { ValidateBasic() error + + jsontypes.Tagged } func init() { - tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage") - tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage") - tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal") - tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL") - tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart") - tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote") - tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote") - tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") - tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") + jsontypes.MustRegister(&NewRoundStepMessage{}) + jsontypes.MustRegister(&NewValidBlockMessage{}) + jsontypes.MustRegister(&ProposalMessage{}) + jsontypes.MustRegister(&ProposalPOLMessage{}) + jsontypes.MustRegister(&BlockPartMessage{}) + jsontypes.MustRegister(&VoteMessage{}) + jsontypes.MustRegister(&HasVoteMessage{}) + jsontypes.MustRegister(&VoteSetMaj23Message{}) + jsontypes.MustRegister(&VoteSetBitsMessage{}) } // NewRoundStepMessage is sent for every step taken in the ConsensusState. // For every height/round/step transition type NewRoundStepMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Step cstypes.RoundStepType - SecondsSinceStartTime int64 + SecondsSinceStartTime int64 `json:",string"` LastCommitRound int32 } +func (*NewRoundStepMessage) TypeTag() string { return "tendermint/NewRoundStepMessage" } + // ValidateBasic performs basic validation. func (m *NewRoundStepMessage) ValidateBasic() error { if m.Height < 0 { @@ -93,13 +97,15 @@ func (m *NewRoundStepMessage) String() string { // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlockMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 BlockPartSetHeader types.PartSetHeader BlockParts *bits.BitArray IsCommit bool } +func (*NewValidBlockMessage) TypeTag() string { return "tendermint/NewValidBlockMessage" } + // ValidateBasic performs basic validation. func (m *NewValidBlockMessage) ValidateBasic() error { if m.Height < 0 { @@ -109,7 +115,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error { return errors.New("negative Round") } if err := m.BlockPartSetHeader.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockPartSetHeader: %v", err) + return fmt.Errorf("wrong BlockPartSetHeader: %w", err) } if m.BlockParts.Size() == 0 { return errors.New("empty blockParts") @@ -136,6 +142,8 @@ type ProposalMessage struct { Proposal *types.Proposal } +func (*ProposalMessage) TypeTag() string { return "tendermint/Proposal" } + // ValidateBasic performs basic validation. func (m *ProposalMessage) ValidateBasic() error { return m.Proposal.ValidateBasic() @@ -148,11 +156,13 @@ func (m *ProposalMessage) String() string { // ProposalPOLMessage is sent when a previous proposal is re-proposed. type ProposalPOLMessage struct { - Height int64 + Height int64 `json:",string"` ProposalPOLRound int32 ProposalPOL *bits.BitArray } +func (*ProposalPOLMessage) TypeTag() string { return "tendermint/ProposalPOL" } + // ValidateBasic performs basic validation. func (m *ProposalPOLMessage) ValidateBasic() error { if m.Height < 0 { @@ -177,11 +187,13 @@ func (m *ProposalPOLMessage) String() string { // BlockPartMessage is sent when gossipping a piece of the proposed block. type BlockPartMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Part *types.Part } +func (*BlockPartMessage) TypeTag() string { return "tendermint/BlockPart" } + // ValidateBasic performs basic validation. func (m *BlockPartMessage) ValidateBasic() error { if m.Height < 0 { @@ -191,7 +203,7 @@ func (m *BlockPartMessage) ValidateBasic() error { return errors.New("negative Round") } if err := m.Part.ValidateBasic(); err != nil { - return fmt.Errorf("wrong Part: %v", err) + return fmt.Errorf("wrong Part: %w", err) } return nil } @@ -206,6 +218,8 @@ type VoteMessage struct { Vote *types.Vote } +func (*VoteMessage) TypeTag() string { return "tendermint/Vote" } + // ValidateBasic performs basic validation. func (m *VoteMessage) ValidateBasic() error { return m.Vote.ValidateBasic() @@ -218,12 +232,14 @@ func (m *VoteMessage) String() string { // HasVoteMessage is sent to indicate that a particular vote has been received. type HasVoteMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType Index int32 } +func (*HasVoteMessage) TypeTag() string { return "tendermint/HasVote" } + // ValidateBasic performs basic validation. func (m *HasVoteMessage) ValidateBasic() error { if m.Height < 0 { @@ -248,12 +264,14 @@ func (m *HasVoteMessage) String() string { // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23Message struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType BlockID types.BlockID } +func (*VoteSetMaj23Message) TypeTag() string { return "tendermint/VoteSetMaj23" } + // ValidateBasic performs basic validation. func (m *VoteSetMaj23Message) ValidateBasic() error { if m.Height < 0 { @@ -266,7 +284,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } return nil @@ -280,13 +298,15 @@ func (m *VoteSetMaj23Message) String() string { // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the // BlockID. type VoteSetBitsMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType BlockID types.BlockID Votes *bits.BitArray } +func (*VoteSetBitsMessage) TypeTag() string { return "tendermint/VoteSetBits" } + // ValidateBasic performs basic validation. func (m *VoteSetBitsMessage) ValidateBasic() error { if m.Height < 0 { @@ -296,7 +316,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // NOTE: Votes.Size() can be zero if the node does not have any diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index c22ebf5c0f..e85936820c 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/hex" "fmt" "math" @@ -24,6 +25,9 @@ import ( ) func TestMsgToProto(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + psh := types.PartSetHeader{ Total: 1, Hash: tmrand.Bytes(32), @@ -62,7 +66,7 @@ func TestMsgToProto(t *testing.T) { pbProposal := proposal.ToProto() pv := types.NewMockPV() - vote, err := factory.MakeVote(pv, factory.DefaultTestChainID, + vote, err := factory.MakeVote(ctx, pv, factory.DefaultTestChainID, 0, 1, 0, 2, types.BlockID{}, time.Now()) require.NoError(t, err) pbVote := vote.ToProto() @@ -313,7 +317,6 @@ func TestWALMsgProto(t *testing.T) { } } -// nolint:lll //ignore line length for tests func TestConsMsgsVectors(t *testing.T) { date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC) psh := types.PartSetHeader{ @@ -354,6 +357,11 @@ func TestConsMsgsVectors(t *testing.T) { } pbProposal := proposal.ToProto() + ext := types.VoteExtension{ + AppDataToSign: []byte("signed"), + AppDataSelfAuthenticating: []byte("auth"), + } + v := &types.Vote{ ValidatorAddress: []byte("add_more_exclamation"), ValidatorIndex: 1, @@ -362,6 +370,7 @@ func TestConsMsgsVectors(t *testing.T) { Timestamp: date, Type: tmproto.PrecommitType, BlockID: bi, + VoteExtension: ext, } vpb := v.ToProto() @@ -398,7 +407,7 @@ func TestConsMsgsVectors(t *testing.T) { "2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, {"Vote", &tmcons.Message{Sum: &tmcons.Message_Vote{ Vote: &tmcons.Vote{Vote: vpb}}}, - "32700a6e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e3801"}, + "3280010a7e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e38014a0e0a067369676e6564120461757468"}, {"HasVote", &tmcons.Message{Sum: &tmcons.Message_HasVote{ HasVote: &tmcons.HasVote{Height: 1, Round: 1, Type: tmproto.PrevoteType, Index: 1}}}, "3a080801100118012001"}, diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go new file mode 100644 index 0000000000..cc1e3babf8 --- /dev/null +++ b/internal/consensus/pbts_test.go @@ -0,0 +1,500 @@ +package consensus + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/libs/log" + tmtimemocks "github.com/tendermint/tendermint/libs/time/mocks" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +const ( + // blockTimeIota is used in the test harness as the time between + // blocks when not otherwise specified. + blockTimeIota = time.Millisecond +) + +// pbtsTestHarness constructs a Tendermint network that can be used for testing the +// implementation of the Proposer-Based timestamps algorithm. +// It runs a series of consensus heights and captures timing of votes and events. +type pbtsTestHarness struct { + // configuration options set by the user of the test harness. + pbtsTestConfiguration + + // The timestamp of the first block produced by the network. + firstBlockTime time.Time + + // The Tendermint consensus state machine being run during + // a run of the pbtsTestHarness. + observedState *State + + // A stub for signing votes and messages using the key + // from the observedState. + observedValidator *validatorStub + + // A list of simulated validators that interact with the observedState and are + // fully controlled by the test harness. + otherValidators []*validatorStub + + // The mock time source used by all of the validator stubs in the test harness. + // This mock clock allows the test harness to produce votes and blocks with arbitrary + // timestamps. + validatorClock *tmtimemocks.Source + + chainID string + + // channels for verifying that the observed validator completes certain actions. + ensureProposalCh, roundCh, blockCh, ensureVoteCh <-chan tmpubsub.Message + + // channel of events from the observed validator annotated with the timestamp + // the event was received. + eventCh <-chan timestampedEvent + + currentHeight int64 + currentRound int32 +} + +type pbtsTestConfiguration struct { + // The timestamp consensus parameters to be used by the state machine under test. + synchronyParams types.SynchronyParams + + // The setting to use for the TimeoutPropose configuration parameter. + timeoutPropose time.Duration + + // The genesis time + genesisTime time.Time + + // The times offset from height 1 block time of the block proposed at height 2. + height2ProposedBlockOffset time.Duration + + // The time offset from height 1 block time at which the proposal at height 2 should be delivered. + height2ProposalTimeDeliveryOffset time.Duration + + // The time offset from height 1 block time of the block proposed at height 4. + // At height 4, the proposed block and the deliver offsets are the same so + // that timely-ness does not affect height 4. + height4ProposedBlockOffset time.Duration +} + +func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfiguration) pbtsTestHarness { + t.Helper() + const validators = 4 + cfg := configSetup(t) + clock := new(tmtimemocks.Source) + + if tc.genesisTime.IsZero() { + tc.genesisTime = time.Now() + } + + if tc.height4ProposedBlockOffset == 0 { + + // Set a default height4ProposedBlockOffset. + // Use a proposed block time that is greater than the time that the + // block at height 2 was delivered. Height 3 is not relevant for testing + // and always occurs blockTimeIota before height 4. If not otherwise specified, + // height 4 therefore occurs 2*blockTimeIota after height 2. + tc.height4ProposedBlockOffset = tc.height2ProposalTimeDeliveryOffset + 2*blockTimeIota + } + cfg.Consensus.TimeoutPropose = tc.timeoutPropose + consensusParams := types.DefaultConsensusParams() + consensusParams.Synchrony = tc.synchronyParams + + state, privVals := makeGenesisState(ctx, t, cfg, genesisStateArgs{ + Params: consensusParams, + Time: tc.genesisTime, + Validators: validators, + }) + cs := newState(ctx, t, log.TestingLogger(), state, privVals[0], kvstore.NewApplication()) + vss := make([]*validatorStub, validators) + for i := 0; i < validators; i++ { + vss[i] = newValidatorStub(privVals[i], int32(i)) + } + incrementHeight(vss[1:]...) + + for _, vs := range vss { + vs.clock = clock + } + pubKey, err := vss[0].PrivValidator.GetPubKey(ctx) + require.NoError(t, err) + + eventCh := timestampedCollector(ctx, t, cs.eventBus) + + return pbtsTestHarness{ + pbtsTestConfiguration: tc, + observedValidator: vss[0], + observedState: cs, + otherValidators: vss[1:], + validatorClock: clock, + currentHeight: 1, + chainID: cfg.ChainID(), + roundCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound), + ensureProposalCh: subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal), + blockCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock), + ensureVoteCh: subscribeToVoterBuffered(ctx, t, cs, pubKey.Address()), + eventCh: eventCh, + } +} + +func (p *pbtsTestHarness) observedValidatorProposerHeight(ctx context.Context, t *testing.T, previousBlockTime time.Time) (heightResult, time.Time) { + p.validatorClock.On("Now").Return(p.genesisTime.Add(p.height2ProposedBlockOffset)).Times(6) + + ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound) + + timeout := time.Until(previousBlockTime.Add(ensureTimeout)) + ensureProposalWithTimeout(t, p.ensureProposalCh, p.currentHeight, p.currentRound, nil, timeout) + + rs := p.observedState.GetRoundState() + bid := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} + ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...) + + signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...) + ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + + ensureNewBlock(t, p.blockCh, p.currentHeight) + + vk, err := p.observedValidator.GetPubKey(ctx) + require.NoError(t, err) + res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, vk.Address()) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res, rs.ProposalBlock.Time +} + +func (p *pbtsTestHarness) height2(ctx context.Context, t *testing.T) heightResult { + signer := p.otherValidators[0].PrivValidator + return p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposalTimeDeliveryOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota)) +} + +func (p *pbtsTestHarness) intermediateHeights(ctx context.Context, t *testing.T) { + signer := p.otherValidators[1].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height4ProposedBlockOffset)) + + signer = p.otherValidators[2].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + time.Now()) +} + +func (p *pbtsTestHarness) height5(ctx context.Context, t *testing.T) (heightResult, time.Time) { + return p.observedValidatorProposerHeight(ctx, t, p.firstBlockTime.Add(p.height4ProposedBlockOffset)) +} + +func (p *pbtsTestHarness) nextHeight(ctx context.Context, t *testing.T, proposer types.PrivValidator, deliverTime, proposedTime, nextProposedTime time.Time) heightResult { + p.validatorClock.On("Now").Return(nextProposedTime).Times(6) + + ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound) + + b, _, err := p.observedState.createProposalBlock() + require.NoError(t, err) + b.Height = p.currentHeight + b.Header.Height = p.currentHeight + b.Header.Time = proposedTime + + k, err := proposer.GetPubKey(ctx) + require.NoError(t, err) + b.Header.ProposerAddress = k.Address() + ps, err := b.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + bid := types.BlockID{Hash: b.Hash(), PartSetHeader: ps.Header()} + prop := types.NewProposal(p.currentHeight, 0, -1, bid, proposedTime) + tp := prop.ToProto() + + if err := proposer.SignProposal(ctx, p.observedState.state.ChainID, tp); err != nil { + t.Fatalf("error signing proposal: %s", err) + } + + time.Sleep(time.Until(deliverTime)) + prop.Signature = tp.Signature + if err := p.observedState.SetProposalAndBlock(ctx, prop, b, ps, "peerID"); err != nil { + t.Fatal(err) + } + ensureProposal(t, p.ensureProposalCh, p.currentHeight, 0, bid) + + ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...) + + signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...) + ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + + vk, err := p.observedValidator.GetPubKey(ctx) + require.NoError(t, err) + res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, vk.Address()) + ensureNewBlock(t, p.blockCh, p.currentHeight) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res +} + +func timestampedCollector(ctx context.Context, t *testing.T, eb *eventbus.EventBus) <-chan timestampedEvent { + t.Helper() + + // Since eventCh is not read until the end of each height, it must be large + // enough to hold all of the events produced during a single height. + eventCh := make(chan timestampedEvent, 100) + + if err := eb.Observe(ctx, func(msg tmpubsub.Message) error { + eventCh <- timestampedEvent{ + ts: time.Now(), + m: msg, + } + return nil + }, types.EventQueryVote, types.EventQueryCompleteProposal); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } + return eventCh +} + +func collectHeightResults(ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, address []byte) heightResult { + t.Helper() + var res heightResult + for event := range eventCh { + switch v := event.m.Data().(type) { + case types.EventDataVote: + if v.Vote.Height > height { + t.Fatalf("received prevote from unexpected height, expected: %d, saw: %d", height, v.Vote.Height) + } + if !bytes.Equal(address, v.Vote.ValidatorAddress) { + continue + } + if v.Vote.Type != tmproto.PrevoteType { + continue + } + res.prevote = v.Vote + res.prevoteIssuedAt = event.ts + + case types.EventDataCompleteProposal: + if v.Height > height { + t.Fatalf("received proposal from unexpected height, expected: %d, saw: %d", height, v.Height) + } + res.proposalIssuedAt = event.ts + } + if res.isComplete() { + return res + } + } + t.Fatalf("complete height result never seen for height %d", height) + + panic("unreachable") +} + +type timestampedEvent struct { + ts time.Time + m tmpubsub.Message +} + +func (p *pbtsTestHarness) run(ctx context.Context, t *testing.T) resultSet { + startTestRound(ctx, p.observedState, p.currentHeight, p.currentRound) + + r1, proposalBlockTime := p.observedValidatorProposerHeight(ctx, t, p.genesisTime) + p.firstBlockTime = proposalBlockTime + r2 := p.height2(ctx, t) + p.intermediateHeights(ctx, t) + r5, _ := p.height5(ctx, t) + return resultSet{ + genesisHeight: r1, + height2: r2, + height5: r5, + } +} + +type resultSet struct { + genesisHeight heightResult + height2 heightResult + height5 heightResult +} + +type heightResult struct { + proposalIssuedAt time.Time + prevote *types.Vote + prevoteIssuedAt time.Time +} + +func (hr heightResult) isComplete() bool { + return !hr.proposalIssuedAt.IsZero() && !hr.prevoteIssuedAt.IsZero() && hr.prevote != nil +} + +// TestProposerWaitsForGenesisTime tests that a proposer will not propose a block +// until after the genesis time has passed. The test sets the genesis time in the +// future and then ensures that the observed validator waits to propose a block. +func TestProposerWaitsForGenesisTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a genesis time far (enough) in the future. + initialTime := time.Now().Add(800 * time.Millisecond) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 10 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height2ProposedBlockOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // ensure that the proposal was issued after the genesis time. + assert.True(t, results.genesisHeight.proposalIssuedAt.After(cfg.genesisTime)) +} + +// TestProposerWaitsForPreviousBlock tests that the proposer of a block waits until +// the block time of the previous height has passed to propose the next block. +// The test harness ensures that the observed validator will be the proposer at +// height 1 and height 5. The test sets the block time of height 4 in the future +// and then verifies that the observed validator waits until after the block time +// of height 4 to propose a block at height 5. +func TestProposerWaitsForPreviousBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + initialTime := time.Now().Add(time.Millisecond * 50) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 100 * time.Millisecond, + MessageDelay: 500 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 150 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height4ProposedBlockOffset: 800 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // the observed validator is the proposer at height 5. + // ensure that the observed validator did not propose a block until after + // the time configured for height 4. + assert.True(t, results.height5.proposalIssuedAt.After(pbtsTest.firstBlockTime.Add(cfg.height4ProposedBlockOffset))) + + // Ensure that the validator issued a prevote for a non-nil block. + assert.NotNil(t, results.height5.prevote.BlockID.Hash) +} + +func TestProposerWaitTime(t *testing.T) { + genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z") + require.NoError(t, err) + testCases := []struct { + name string + previousBlockTime time.Time + localTime time.Time + expectedWait time.Duration + }{ + { + name: "block time greater than local time", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(1 * time.Nanosecond), + expectedWait: 4 * time.Nanosecond, + }, + { + name: "local time greater than block time", + previousBlockTime: genesisTime.Add(1 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + { + name: "both times equal", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + mockSource := new(tmtimemocks.Source) + mockSource.On("Now").Return(testCase.localTime) + + ti := proposerWaitTime(mockSource, testCase.previousBlockTime) + assert.Equal(t, testCase.expectedWait, ti) + }) + } +} + +func TestTimelyProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialTime := time.Now() + + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 140 * time.Millisecond, + }, + timeoutPropose: 40 * time.Millisecond, + genesisTime: initialTime, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + require.NotNil(t, results.height2.prevote.BlockID.Hash) +} + +func TestTooFarInThePastProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime > proposedBlockTime + MsgDelay + Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 27 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} + +func TestTooFarInTheFutureProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime < proposedBlockTime - Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 150 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 73e61f21c5..77769937db 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -1,15 +1,14 @@ package consensus import ( + "encoding/json" "errors" "fmt" "sync" "time" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -23,8 +22,8 @@ var ( // peerStateStats holds internal statistics for a peer. type peerStateStats struct { - Votes int `json:"votes"` - BlockParts int `json:"block_parts"` + Votes int `json:"votes,string"` + BlockParts int `json:"block_parts,string"` } func (pss peerStateStats) String() string { @@ -40,13 +39,10 @@ type PeerState struct { logger log.Logger // NOTE: Modify below using setters, never directly. - mtx tmsync.RWMutex + mtx sync.RWMutex running bool PRS cstypes.PeerRoundState `json:"round_state"` Stats *peerStateStats `json:"stats"` - - broadcastWG sync.WaitGroup - closer *tmsync.Closer } // NewPeerState returns a new PeerState for the given node ID. @@ -54,7 +50,6 @@ func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState { return &PeerState{ peerID: peerID, logger: logger, - closer: tmsync.NewCloser(), PRS: cstypes.PeerRoundState{ Round: -1, ProposalPOLRound: -1, @@ -96,8 +91,7 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { func (ps *PeerState) ToJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - - return tmjson.Marshal(ps) + return json.Marshal(ps) } // GetHeight returns an atomic snapshot of the PeerRoundState's height used by @@ -193,7 +187,10 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, boo } if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - return votes.GetByIndex(int32(index)), true + vote := votes.GetByIndex(int32(index)) + if vote != nil { + return vote, true + } } return nil, false @@ -358,6 +355,9 @@ func (ps *PeerState) BlockPartsSent() int { // SetHasVote sets the given vote as known by the peer func (ps *PeerState) SetHasVote(vote *types.Vote) { + if vote == nil { + return + } ps.mtx.Lock() defer ps.mtx.Unlock() diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 62517fd4fe..53b0261130 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1,18 +1,22 @@ package consensus import ( + "context" + "errors" "fmt" "runtime/debug" + "sync" "time" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + tmtime "github.com/tendermint/tendermint/libs/time" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -25,9 +29,9 @@ var ( // GetChannelDescriptor produces an instance of a descriptor for this // package's required channels. -func GetChannelDescriptors() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { +func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { + return map[p2p.ChannelID]*p2p.ChannelDescriptor{ + StateChannel: { ID: StateChannel, MessageType: new(tmcons.Message), Priority: 8, @@ -35,7 +39,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 128, }, - { + DataChannel: { // TODO: Consider a split between gossiping current block and catchup // stuff. Once we gossip the whole block there is nothing left to send // until next height or round. @@ -46,7 +50,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvBufferCapacity: 512, RecvMessageCapacity: maxMsgSize, }, - { + VoteChannel: { ID: VoteChannel, MessageType: new(tmcons.Message), Priority: 10, @@ -54,7 +58,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvBufferCapacity: 128, RecvMessageCapacity: maxMsgSize, }, - { + VoteSetBitsChannel: { ID: VoteSetBitsChannel, MessageType: new(tmcons.Message), Priority: 5, @@ -79,12 +83,10 @@ const ( listenerIDConsensus = "consensus-reactor" ) -type ReactorOption func(*Reactor) - // NOTE: Temporary interface for switching to block sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 type BlockSyncReactor interface { - SwitchToBlockSync(sm.State) error + SwitchToBlockSync(context.Context, sm.State) error GetMaxPeerBlockHeight() int64 @@ -108,27 +110,22 @@ type ConsSyncReactor interface { // Reactor defines a reactor for the consensus service. type Reactor struct { service.BaseService + logger log.Logger state *State - eventBus *types.EventBus + eventBus *eventbus.EventBus Metrics *Metrics - mtx tmsync.RWMutex - peers map[types.NodeID]*PeerState - waitSync bool + mtx sync.RWMutex + peers map[types.NodeID]*PeerState + waitSync bool + readySignal chan struct{} // closed when the node is ready to start consensus stateCh *p2p.Channel dataCh *p2p.Channel voteCh *p2p.Channel voteSetBitsCh *p2p.Channel peerUpdates *p2p.PeerUpdates - - // NOTE: We need a dedicated stateCloseCh channel for signaling closure of - // the StateChannel due to the fact that the StateChannel message handler - // performs a send on the VoteSetBitsChannel. This is an antipattern, so having - // this dedicated channel,stateCloseCh, is necessary in order to avoid data races. - stateCloseCh chan struct{} - closeCh chan struct{} } // NewReactor returns a reference to a new consensus reactor, which implements @@ -136,65 +133,82 @@ type Reactor struct { // to relevant p2p Channels and a channel to listen for peer updates on. The // reactor will close all p2p Channels when stopping. func NewReactor( + ctx context.Context, logger log.Logger, cs *State, - stateCh *p2p.Channel, - dataCh *p2p.Channel, - voteCh *p2p.Channel, - voteSetBitsCh *p2p.Channel, + channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, waitSync bool, - options ...ReactorOption, -) *Reactor { + metrics *Metrics, +) (*Reactor, error) { + chans := getChannelDescriptors() + stateCh, err := channelCreator(ctx, chans[StateChannel]) + if err != nil { + return nil, err + } + dataCh, err := channelCreator(ctx, chans[DataChannel]) + if err != nil { + return nil, err + } + + voteCh, err := channelCreator(ctx, chans[VoteChannel]) + if err != nil { + return nil, err + } + + voteSetBitsCh, err := channelCreator(ctx, chans[VoteSetBitsChannel]) + if err != nil { + return nil, err + } r := &Reactor{ + logger: logger, state: cs, waitSync: waitSync, peers: make(map[types.NodeID]*PeerState), - Metrics: NopMetrics(), + Metrics: metrics, stateCh: stateCh, dataCh: dataCh, voteCh: voteCh, voteSetBitsCh: voteSetBitsCh, peerUpdates: peerUpdates, - stateCloseCh: make(chan struct{}), - closeCh: make(chan struct{}), + readySignal: make(chan struct{}), } r.BaseService = *service.NewBaseService(logger, "Consensus", r) - for _, opt := range options { - opt(r) + if !r.waitSync { + close(r.readySignal) } - return r + return r, nil } // OnStart starts separate go routines for each p2p Channel and listens for // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - r.Logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) +func (r *Reactor) OnStart(ctx context.Context) error { + r.logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) // start routine that computes peer statistics for evaluating peer quality // // TODO: Evaluate if we need this to be synchronized via WaitGroup as to not // leak the goroutine when stopping the reactor. - go r.peerStatsRoutine() + go r.peerStatsRoutine(ctx) r.subscribeToBroadcastEvents() if !r.WaitSync() { - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { return err } } - go r.processStateCh() - go r.processDataCh() - go r.processVoteCh() - go r.processVoteSetBitsCh() - go r.processPeerUpdates() + go r.processStateCh(ctx) + go r.processDataCh(ctx) + go r.processVoteCh(ctx) + go r.processVoteSetBitsCh(ctx) + go r.processPeerUpdates(ctx) return nil } @@ -203,47 +217,21 @@ func (r *Reactor) OnStart() error { // blocking until they all exit, as well as unsubscribing from events and stopping // state. func (r *Reactor) OnStop() { - r.unsubscribeFromBroadcastEvents() if err := r.state.Stop(); err != nil { - r.Logger.Error("failed to stop consensus state", "err", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + r.logger.Error("failed to stop consensus state", "err", err) + } } if !r.WaitSync() { r.state.Wait() } - - r.mtx.Lock() - // Close and wait for each of the peers to shutdown. - // This is safe to perform with the lock since none of the peers require the - // lock to complete any of the methods that the waitgroup is waiting on. - for _, state := range r.peers { - state.closer.Close() - state.broadcastWG.Wait() - } - r.mtx.Unlock() - - // Close the StateChannel goroutine separately since it uses its own channel - // to signal closure. - close(r.stateCloseCh) - <-r.stateCh.Done() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.voteSetBitsCh.Done() - <-r.dataCh.Done() - <-r.voteCh.Done() - <-r.peerUpdates.Done() } // SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *types.EventBus) { +func (r *Reactor) SetEventBus(b *eventbus.EventBus) { r.eventBus = b r.state.SetEventBus(b) } @@ -256,15 +244,10 @@ func (r *Reactor) WaitSync() bool { return r.waitSync } -// ReactorMetrics sets the reactor's metrics as an option function. -func ReactorMetrics(metrics *Metrics) ReactorOption { - return func(r *Reactor) { r.Metrics = metrics } -} - // SwitchToConsensus switches from block-sync mode to consensus mode. It resets // the state, turns off block-sync, and starts the consensus state-machine. -func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { - r.Logger.Info("switching to consensus") +func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { + r.logger.Info("switching to consensus") // we have no votes, so reconstruct LastCommit from SeenCommit if state.LastBlockHeight > 0 { @@ -273,10 +256,11 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a // NewRoundStepMessage. - r.state.updateToState(state) + r.state.updateToState(ctx, state) r.mtx.Lock() r.waitSync = false + close(r.readySignal) r.mtx.Unlock() r.Metrics.BlockSyncing.Set(0) @@ -286,7 +270,7 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.state.doWALCatchup = false } - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { panic(fmt.Sprintf(`failed to start consensus state: %v conS: @@ -297,8 +281,8 @@ conR: } d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil { - r.Logger.Error("failed to emit the blocksync complete event", "err", err) + if err := r.eventBus.PublishEventBlockSyncStatus(ctx, d); err != nil { + r.logger.Error("failed to emit the blocksync complete event", "err", err) } } @@ -337,16 +321,16 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { return ps, ok } -func (r *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { - r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) broadcastNewRoundStepMessage(ctx context.Context, rs *cstypes.RoundState) error { + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: makeRoundStepMessage(rs), - } + }) } -func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { +func (r *Reactor) broadcastNewValidBlockMessage(ctx context.Context, rs *cstypes.RoundState) error { psHeader := rs.ProposalBlockParts.Header() - r.stateCh.Out <- p2p.Envelope{ + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &tmcons.NewValidBlock{ Height: rs.Height, @@ -355,11 +339,11 @@ func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { BlockParts: rs.ProposalBlockParts.BitArray().ToProto(), IsCommit: rs.Step == cstypes.RoundStepCommit, }, - } + }) } -func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { - r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) broadcastHasVoteMessage(ctx context.Context, vote *types.Vote) error { + return r.stateCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &tmcons.HasVote{ Height: vote.Height, @@ -367,7 +351,7 @@ func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { Type: vote.Type, Index: vote.ValidatorIndex, }, - } + }) } // subscribeToBroadcastEvents subscribes for new round steps and votes using the @@ -377,38 +361,44 @@ func (r *Reactor) subscribeToBroadcastEvents() { err := r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventNewRoundStepValue, - func(data tmevents.EventData) { - r.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + func(ctx context.Context, data tmevents.EventData) error { + if err := r.broadcastNewRoundStepMessage(ctx, data.(*cstypes.RoundState)); err != nil { + return err + } select { case r.state.onStopCh <- data.(*cstypes.RoundState): + return nil + case <-ctx.Done(): + return ctx.Err() default: + return nil } }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventValidBlockValue, - func(data tmevents.EventData) { - r.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) + func(ctx context.Context, data tmevents.EventData) error { + return r.broadcastNewValidBlockMessage(ctx, data.(*cstypes.RoundState)) }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventVoteValue, - func(data tmevents.EventData) { - r.broadcastHasVoteMessage(data.(*types.Vote)) + func(ctx context.Context, data tmevents.EventData) error { + return r.broadcastHasVoteMessage(ctx, data.(*types.Vote)) }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } } @@ -426,17 +416,15 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep { } } -func (r *Reactor) sendNewRoundStepMessage(peerID types.NodeID) { - rs := r.state.GetRoundState() - msg := makeRoundStepMessage(rs) - r.stateCh.Out <- p2p.Envelope{ +func (r *Reactor) sendNewRoundStepMessage(ctx context.Context, peerID types.NodeID) error { + return r.stateCh.Send(ctx, p2p.Envelope{ To: peerID, - Message: msg, - } + Message: makeRoundStepMessage(r.state.GetRoundState()), + }) } -func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) { - logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) +func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) { + logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { // ensure that the peer's PartSetHeader is correct @@ -484,14 +472,14 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer } logger.Debug("sending block part for catchup", "round", prs.Round, "index", index) - r.dataCh.Out <- p2p.Envelope{ + _ = r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.BlockPart{ Height: prs.Height, // not our height, so it does not matter. Round: prs.Round, // not our height, so it does not matter Part: *partProto, }, - } + }) return } @@ -499,10 +487,11 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer time.Sleep(r.state.config.PeerGossipSleepDuration) } -func (r *Reactor) gossipDataRoutine(ps *PeerState) { - logger := r.Logger.With("peer", ps.peerID) +func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState) { + logger := r.logger.With("peer", ps.peerID) - defer ps.broadcastWG.Done() + timer := time.NewTimer(0) + defer timer.Stop() OUTER_LOOP: for { @@ -511,11 +500,8 @@ OUTER_LOOP: } select { - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return - default: } @@ -533,13 +519,15 @@ OUTER_LOOP: } logger.Debug("sending block part", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.BlockPart{ Height: rs.Height, // this tells peer that this part applies to us Round: rs.Round, // this tells peer that this part applies to us Part: *partProto, }, + }); err != nil { + return } ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) @@ -562,7 +550,13 @@ OUTER_LOOP: "blockstoreBase", blockStoreBase, "blockstoreHeight", r.state.blockStore.Height(), ) - time.Sleep(r.state.config.PeerGossipSleepDuration) + + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) } @@ -572,13 +566,18 @@ OUTER_LOOP: continue OUTER_LOOP } - r.gossipDataForCatchup(rs, prs, ps) + r.gossipDataForCatchup(ctx, rs, prs, ps) continue OUTER_LOOP } // if height and round don't match, sleep if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - time.Sleep(r.state.config.PeerGossipSleepDuration) + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } continue OUTER_LOOP } @@ -594,11 +593,13 @@ OUTER_LOOP: propProto := rs.Proposal.ToProto() logger.Debug("sending proposal", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.Proposal{ Proposal: *propProto, }, + }); err != nil { + return } // NOTE: A peer might have received a different proposal message, so @@ -615,13 +616,15 @@ OUTER_LOOP: pPolProto := pPol.ToProto() logger.Debug("sending POL", "height", prs.Height, "round", prs.Round) - r.dataCh.Out <- p2p.Envelope{ + if err := r.dataCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.ProposalPOL{ Height: rs.Height, ProposalPolRound: rs.Proposal.POLRound, ProposalPol: *pPolProto, }, + }); err != nil { + return } } @@ -629,96 +632,123 @@ OUTER_LOOP: } // nothing to do -- sleep - time.Sleep(r.state.config.PeerGossipSleepDuration) + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } continue OUTER_LOOP } } // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool { - if vote, ok := ps.PickVoteToSend(votes); ok { - r.Logger.Debug("sending vote message", "ps", ps, "vote", vote) - r.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: vote.ToProto(), - }, - } +func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader) (bool, error) { + vote, ok := ps.PickVoteToSend(votes) + if !ok { + return false, nil + } - ps.SetHasVote(vote) - return true + r.logger.Debug("sending vote message", "ps", ps, "vote", vote) + if err := r.voteCh.Send(ctx, p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: vote.ToProto(), + }, + }); err != nil { + return false, err } - return false + ps.SetHasVote(vote) + return true, nil } -func (r *Reactor) gossipVotesForHeight(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { - logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) +func (r *Reactor) gossipVotesForHeight( + ctx context.Context, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + ps *PeerState, +) (bool, error) { + logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) // if there are lastCommits to send... if prs.Step == cstypes.RoundStepNewHeight { - if r.pickSendVote(ps, rs.LastCommit) { + if ok, err := r.pickSendVote(ctx, ps, rs.LastCommit); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.LastCommit to send") - return true + return true, nil + } } // if there are POL prevotes to send... if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } // if there are prevotes to send... if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are precommits to send... if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Precommits(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Precommits(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Precommits(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are prevotes to send...(which are needed because of validBlock mechanism) if prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round)); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are POLPrevotes to send... if prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } - return false + return false, nil } -func (r *Reactor) gossipVotesRoutine(ps *PeerState) { - logger := r.Logger.With("peer", ps.peerID) - - defer ps.broadcastWG.Done() +func (r *Reactor) gossipVotesRoutine(ctx context.Context, ps *PeerState) { + logger := r.logger.With("peer", ps.peerID) // XXX: simple hack to throttle logs upon sleep logThrottle := 0 + timer := time.NewTimer(0) + defer timer.Stop() + OUTER_LOOP: for { if !r.IsRunning() { @@ -726,11 +756,8 @@ OUTER_LOOP: } select { - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return - default: } @@ -746,14 +773,18 @@ OUTER_LOOP: // if height matches, then send LastCommit, Prevotes, and Precommits if rs.Height == prs.Height { - if r.gossipVotesForHeight(rs, prs, ps) { + if ok, err := r.gossipVotesForHeight(ctx, rs, prs, ps); err != nil { + return + } else if ok { continue OUTER_LOOP } } // special catchup logic -- if peer is lagging by height 1, send LastCommit if prs.Height != 0 && rs.Height == prs.Height+1 { - if r.pickSendVote(ps, rs.LastCommit) { + if ok, err := r.pickSendVote(ctx, ps, rs.LastCommit); err != nil { + return + } else if ok { logger.Debug("picked rs.LastCommit to send", "height", prs.Height) continue OUTER_LOOP } @@ -765,7 +796,9 @@ OUTER_LOOP: // Load the block commit for prs.Height, which contains precommit // signatures for prs.Height. if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil { - if r.pickSendVote(ps, commit) { + if ok, err := r.pickSendVote(ctx, ps, commit); err != nil { + return + } else if ok { logger.Debug("picked Catchup commit to send", "height", prs.Height) continue OUTER_LOOP } @@ -786,15 +819,21 @@ OUTER_LOOP: logThrottle = 1 } - time.Sleep(r.state.config.PeerGossipSleepDuration) + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-ctx.Done(): + return + case <-timer.C: + } continue OUTER_LOOP } } // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. -func (r *Reactor) queryMaj23Routine(ps *PeerState) { - defer ps.broadcastWG.Done() +func (r *Reactor) queryMaj23Routine(ctx context.Context, ps *PeerState) { + timer := time.NewTimer(0) + defer timer.Stop() OUTER_LOOP: for { @@ -803,11 +842,8 @@ OUTER_LOOP: } select { - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return - default: } @@ -818,7 +854,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -826,9 +862,16 @@ OUTER_LOOP: Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } } } } @@ -840,7 +883,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -848,9 +891,16 @@ OUTER_LOOP: Type: tmproto.PrecommitType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + select { + case <-timer.C: + timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + case <-ctx.Done(): + return + } } } } @@ -862,7 +912,7 @@ OUTER_LOOP: if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -870,9 +920,16 @@ OUTER_LOOP: Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }, + }); err != nil { + return } - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } } } } @@ -887,7 +944,7 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= r.state.blockStore.Height() && prs.Height >= r.state.blockStore.Base() { if commit := r.state.LoadCommit(prs.Height); commit != nil { - r.stateCh.Out <- p2p.Envelope{ + if err := r.stateCh.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: &tmcons.VoteSetMaj23{ Height: prs.Height, @@ -895,14 +952,27 @@ OUTER_LOOP: Type: tmproto.PrecommitType, BlockID: commit.BlockID.ToProto(), }, + }); err != nil { + return } - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } } } } - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } + continue OUTER_LOOP } } @@ -912,8 +982,8 @@ OUTER_LOOP: // be the case, and we spawn all the relevant goroutine to broadcast messages to // the peer. During peer removal, we remove the peer for our set of peers and // signal to all spawned goroutines to gracefully exit in a non-blocking manner. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -922,20 +992,14 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: // Do not allow starting new broadcasting goroutines after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer goroutines and closed r.closeCh, but the router still sends in-flight - // peer updates. + // peer goroutines, but the router still sends in-flight peer updates. if !r.IsRunning() { return } - var ( - ps *PeerState - ok bool - ) - - ps, ok = r.peers[peerUpdate.NodeID] + ps, ok := r.peers[peerUpdate.NodeID] if !ok { - ps = NewPeerState(r.Logger, peerUpdate.NodeID) + ps = NewPeerState(r.logger, peerUpdate.NodeID) r.peers[peerUpdate.NodeID] = ps } @@ -944,32 +1008,38 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // when the peer is removed. We also set the running state to ensure we // do not spawn multiple instances of the same goroutines and finally we // set the waitgroup counter so we know when all goroutines have exited. - ps.broadcastWG.Add(3) ps.SetRunning(true) - // start goroutines for this peer - go r.gossipDataRoutine(ps) - go r.gossipVotesRoutine(ps) - go r.queryMaj23Routine(ps) + go func() { + select { + case <-ctx.Done(): + return + case <-r.readySignal: + } + // do nothing if the peer has + // stopped while we've been waiting. + if !ps.IsRunning() { + return + } + // start goroutines for this peer + go r.gossipDataRoutine(ctx, ps) + go r.gossipVotesRoutine(ctx, ps) + go r.queryMaj23Routine(ctx, ps) + + // Send our state to the peer. If we're block-syncing, broadcast a + // RoundStepMessage later upon SwitchToConsensus(). + if !r.WaitSync() { + go func() { _ = r.sendNewRoundStepMessage(ctx, ps.peerID) }() + } - // Send our state to the peer. If we're block-syncing, broadcast a - // RoundStepMessage later upon SwitchToConsensus(). - if !r.waitSync { - go r.sendNewRoundStepMessage(ps.peerID) - } + }() } case p2p.PeerStatusDown: ps, ok := r.peers[peerUpdate.NodeID] if ok && ps.IsRunning() { // signal to all spawned goroutines for the peer to gracefully exit - ps.closer.Close() - go func() { - // Wait for all spawned broadcast goroutines to exit before marking the - // peer state as no longer running and removal from the peers map. - ps.broadcastWG.Wait() - r.mtx.Lock() delete(r.peers, peerUpdate.NodeID) r.mtx.Unlock() @@ -985,10 +1055,10 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") + r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") return nil } @@ -999,7 +1069,7 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error r.state.mtx.RUnlock() if err := msgI.(*NewRoundStepMessage).ValidateHeight(initialHeight); err != nil { - r.Logger.Error("peer sent us an invalid msg", "msg", msg, "err", err) + r.logger.Error("peer sent us an invalid msg", "msg", msg, "err", err) return err } @@ -1053,9 +1123,11 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error eMsg.Votes = *votesProto } - r.voteSetBitsCh.Out <- p2p.Envelope{ + if err := r.voteSetBitsCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: eMsg, + }); err != nil { + return err } default: @@ -1069,12 +1141,12 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "DataChannel") +func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "DataChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state") + r.logger.Debug("failed to find peer state") return nil } @@ -1088,17 +1160,24 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { pMsg := msgI.(*ProposalMessage) ps.SetHasProposal(pMsg.Proposal) - r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From} - + select { + case <-ctx.Done(): + return ctx.Err() + case r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From, tmtime.Now()}: + } case *tmcons.ProposalPOL: ps.ApplyProposalPOLMessage(msgI.(*ProposalPOLMessage)) - case *tmcons.BlockPart: bpMsg := msgI.(*BlockPartMessage) ps.SetHasProposalBlockPart(bpMsg.Height, bpMsg.Round, int(bpMsg.Part.Index)) r.Metrics.BlockParts.With("peer_id", string(envelope.From)).Add(1) - r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From} + select { + case r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From, tmtime.Now()}: + return nil + case <-ctx.Done(): + return ctx.Err() + } default: return fmt.Errorf("received unknown message on DataChannel: %T", msg) @@ -1111,12 +1190,12 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "VoteChannel") +func (r *Reactor) handleVoteMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "VoteChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state") + r.logger.Debug("failed to find peer state") return nil } @@ -1137,25 +1216,27 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(vMsg.Vote) - r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From} - + select { + case r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From, tmtime.Now()}: + return nil + case <-ctx.Done(): + return ctx.Err() + } default: return fmt.Errorf("received unknown message on VoteChannel: %T", msg) } - - return nil } // handleVoteSetBitsMessage handles envelopes sent from peers on the // VoteSetBitsChannel. If we fail to find the peer state for the envelope sender, // we perform a no-op and return. This can happen when we process the envelope // after the peer is removed. -func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") +func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state") + r.logger.Debug("failed to find peer state") return nil } @@ -1208,11 +1289,11 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) // the p2p channel. // // NOTE: We block on consensus state for proposals, block parts, and votes. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -1235,20 +1316,20 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } - r.Logger.Debug("received message", "ch_id", chID, "message", msgI, "peer", envelope.From) + r.logger.Debug("received message", "ch_id", chID, "message", msgI, "peer", envelope.From) switch chID { case StateChannel: - err = r.handleStateMessage(envelope, msgI) + err = r.handleStateMessage(ctx, envelope, msgI) case DataChannel: - err = r.handleDataMessage(envelope, msgI) + err = r.handleDataMessage(ctx, envelope, msgI) case VoteChannel: - err = r.handleVoteMessage(envelope, msgI) + err = r.handleVoteMessage(ctx, envelope, msgI) case VoteSetBitsChannel: - err = r.handleVoteSetBitsMessage(envelope, msgI) + err = r.handleVoteSetBitsMessage(ctx, envelope, msgI) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -1262,23 +1343,18 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // execution will result in a PeerError being sent on the StateChannel. When // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processStateCh() { - defer r.stateCh.Close() - - for { - select { - case envelope := <-r.stateCh.In: - if err := r.handleMessage(r.stateCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) - r.stateCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processStateCh(ctx context.Context) { + iter := r.stateCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.stateCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.stateCh.ID, "envelope", envelope, "err", err) + if serr := r.stateCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.stateCloseCh: - r.Logger.Debug("stopped listening on StateChannel; closing...") - return } } } @@ -1288,23 +1364,18 @@ func (r *Reactor) processStateCh() { // execution will result in a PeerError being sent on the DataChannel. When // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processDataCh() { - defer r.dataCh.Close() - - for { - select { - case envelope := <-r.dataCh.In: - if err := r.handleMessage(r.dataCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) - r.dataCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processDataCh(ctx context.Context) { + iter := r.dataCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.dataCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.dataCh.ID, "envelope", envelope, "err", err) + if serr := r.dataCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on DataChannel; closing...") - return } } } @@ -1314,23 +1385,18 @@ func (r *Reactor) processDataCh() { // execution will result in a PeerError being sent on the VoteChannel. When // the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processVoteCh() { - defer r.voteCh.Close() - - for { - select { - case envelope := <-r.voteCh.In: - if err := r.handleMessage(r.voteCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) - r.voteCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processVoteCh(ctx context.Context) { + iter := r.voteCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.voteCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.voteCh.ID, "envelope", envelope, "err", err) + if serr := r.voteCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on VoteChannel; closing...") - return } } } @@ -1340,23 +1406,23 @@ func (r *Reactor) processVoteCh() { // execution will result in a PeerError being sent on the VoteSetBitsChannel. // When the reactor is stopped, we will catch the signal and close the p2p // Channel gracefully. -func (r *Reactor) processVoteSetBitsCh() { - defer r.voteSetBitsCh.Close() - - for { - select { - case envelope := <-r.voteSetBitsCh.In: - if err := r.handleMessage(r.voteSetBitsCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) - r.voteSetBitsCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processVoteSetBitsCh(ctx context.Context) { + iter := r.voteSetBitsCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + + if err := r.handleMessage(ctx, r.voteSetBitsCh.ID, envelope); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return } - case <-r.closeCh: - r.Logger.Debug("stopped listening on VoteSetBitsChannel; closing...") - return + r.logger.Error("failed to process message", "ch_id", r.voteSetBitsCh.ID, "envelope", envelope, "err", err) + if serr := r.voteSetBitsCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return + } } } } @@ -1364,25 +1430,21 @@ func (r *Reactor) processVoteSetBitsCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate) } } } -func (r *Reactor) peerStatsRoutine() { +func (r *Reactor) peerStatsRoutine(ctx context.Context) { for { if !r.IsRunning() { - r.Logger.Info("stopping peerStatsRoutine") + r.logger.Info("stopping peerStatsRoutine") return } @@ -1390,14 +1452,14 @@ func (r *Reactor) peerStatsRoutine() { case msg := <-r.state.statsMsgQueue: ps, ok := r.GetPeerState(msg.PeerID) if !ok || ps == nil { - r.Logger.Debug("attempt to update stats for non-existent peer", "peer", msg.PeerID) + r.logger.Debug("attempt to update stats for non-existent peer", "peer", msg.PeerID) continue } switch msg.Msg.(type) { case *VoteMessage: if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + r.peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) @@ -1405,13 +1467,13 @@ func (r *Reactor) peerStatsRoutine() { case *BlockPartMessage: if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + r.peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) } } - case <-r.closeCh: + case <-ctx.Done(): return } } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 16fa139694..3e1c930d99 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -19,17 +20,16 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/encoding" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" "github.com/tendermint/tendermint/types" ) @@ -42,8 +42,8 @@ type reactorTestSuite struct { network *p2ptest.Network states map[types.NodeID]*State reactors map[types.NodeID]*Reactor - subs map[types.NodeID]types.Subscription - blocksyncSubs map[types.NodeID]types.Subscription + subs map[types.NodeID]eventbus.Subscription + blocksyncSubs map[types.NodeID]eventbus.Subscription stateChannels map[types.NodeID]*p2p.Channel dataChannels map[types.NodeID]*p2p.Channel voteChannels map[types.NodeID]*p2p.Channel @@ -58,45 +58,76 @@ func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { } } -func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSuite { +func setup( + ctx context.Context, + t *testing.T, + numNodes int, + states []*State, + size int, +) *reactorTestSuite { t.Helper() rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + subs: make(map[types.NodeID]eventbus.Subscription, numNodes), + blocksyncSubs: make(map[types.NodeID]eventbus.Subscription, numNodes), } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel, size)) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel, size)) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) - - _, cancel := context.WithCancel(context.Background()) + rts.stateChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(StateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(DataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteSetBitsChannel, size)) + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + + chCreator := func(nodeID types.NodeID) p2p.ChannelCreator { + return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + switch desc.ID { + case StateChannel: + return rts.stateChannels[nodeID], nil + case DataChannel: + return rts.dataChannels[nodeID], nil + case VoteChannel: + return rts.voteChannels[nodeID], nil + case VoteSetBitsChannel: + return rts.voteSetBitsChannels[nodeID], nil + default: + return nil, fmt.Errorf("invalid channel; %v", desc.ID) + } + } + } i := 0 for nodeID, node := range rts.network.Nodes { state := states[i] - reactor := NewReactor( - state.Logger.With("node", nodeID), + reactor, err := NewReactor(ctx, + state.logger.With("node", nodeID), state, - rts.stateChannels[nodeID], - rts.dataChannels[nodeID], - rts.voteChannels[nodeID], - rts.voteSetBitsChannels[nodeID], - node.MakePeerUpdates(t), + chCreator(nodeID), + node.MakePeerUpdates(ctx, t), true, + NopMetrics(), ) + require.NoError(t, err) reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size) + blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + Limit: size, + }) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size) + fsSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryBlockSyncStatus, + Limit: size, + }) require.NoError(t, err) rts.states[nodeID] = state @@ -109,8 +140,9 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu require.NoError(t, state.blockExec.Store().Save(state.state)) } - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) + t.Cleanup(reactor.Wait) i++ } @@ -118,18 +150,9 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu require.Len(t, rts.reactors, numNodes) // start the in-memory network and connect all peers with each other - rts.network.Start(t) + rts.network.Start(ctx, t) - t.Cleanup(func() { - for nodeID, r := range rts.reactors { - require.NoError(t, rts.states[nodeID].eventBus.Stop()) - require.NoError(t, r.Stop()) - require.False(t, r.IsRunning()) - } - - leaktest.Check(t) - cancel() - }) + t.Cleanup(leaktest.Check(t)) return rts } @@ -152,32 +175,39 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { } func waitForAndValidateBlock( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { - require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(t, assertMempool(t, states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{})) } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -185,21 +215,28 @@ func waitForAndValidateBlock( } func waitForAndValidateBlockWithTx( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { ntxs := 0 - BLOCK_TX_LOOP: for { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) // check that txs match the txs we're waiting for. @@ -211,18 +248,17 @@ func waitForAndValidateBlockWithTx( } if ntxs == len(txs) { - break BLOCK_TX_LOOP + break } } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -230,22 +266,30 @@ func waitForAndValidateBlockWithTx( } func waitForBlockWithUpdatedValsAndValidateIt( + bctx context.Context, t *testing.T, n int, updatedVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, css []*State, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { var newBlock *types.Block - LOOP: for { - msg := <-blocksSubs[j].Out() + msg, err := blocksSubs[j].Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + newBlock = msg.Data().(types.EventDataNewBlock).Block if newBlock.LastCommit.Size() == len(updatedVals) { - break LOOP + break } } @@ -253,12 +297,11 @@ func waitForBlockWithUpdatedValsAndValidateIt( } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } @@ -275,19 +318,22 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := makeConsensusState(ctx, t, cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -295,9 +341,12 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - <-s.Out() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -307,9 +356,13 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the consensus switch - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - msg := <-s.Out() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } ensureBlockSyncStatus(t, msg, true, 0) }(sub) } @@ -318,6 +371,9 @@ func TestReactorBasic(t *testing.T) { } func TestReactorWithEvidence(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 @@ -325,7 +381,8 @@ func TestReactorWithEvidence(t *testing.T) { tickerFunc := newMockTickerFunc(true) appFunc := newKVStore - genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30) + valSet, privVals := factory.ValidatorSet(ctx, t, n, 30) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) states := make([]*State, n) logger := consensusLogger() @@ -334,12 +391,13 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() + ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := appFunc(t, logger) vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) @@ -348,12 +406,17 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + mtx := new(sync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app) + + mempool := mempool.NewTxMempool( + log.TestingLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + 0, + ) - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -362,7 +425,8 @@ func TestReactorWithEvidence(t *testing.T) { // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID()) + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 1, defaultTestTime, privVals[vIdx], cfg.ChainID()) + require.NoError(t, err) evpool := &statemocks.EvidencePool{} evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ @@ -372,27 +436,24 @@ func TestReactorWithEvidence(t *testing.T) { evpool2 := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) - cs.SetPrivValidator(pv) + cs := NewState(ctx, logger.With("validator", i, "module", "consensus"), + thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) + cs.SetPrivValidator(ctx, pv) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() - require.NoError(t, err) + eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger.With("validator", i, "module", "consensus")) states[i] = cs } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -401,12 +462,16 @@ func TestReactorWithEvidence(t *testing.T) { // We expect for each validator that is the proposer to propose one piece of // evidence. - go func(s types.Subscription) { - msg := <-s.Out() - block := msg.Data().(types.EventDataNewBlock).Block + go func(s eventbus.Subscription) { + defer wg.Done() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } + block := msg.Data().(types.EventDataNewBlock).Block require.Len(t, block.Evidence.Evidence, 1) - wg.Done() }(sub) } @@ -414,10 +479,13 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState( + states, cleanup := makeConsensusState(ctx, t, cfg, n, @@ -431,18 +499,18 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // send a tx require.NoError( t, - assertMempool(states[3].txNotifier).CheckTx( - context.Background(), + assertMempool(t, states[3].txNotifier).CheckTx( + ctx, []byte{1, 2, 3}, nil, mempool.TxInfo{}, @@ -454,9 +522,12 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -464,19 +535,22 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState(t, + states, cleanup := makeConsensusState(ctx, t, cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -484,9 +558,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -523,10 +600,13 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 - states, cleanup := randConsensusState( + states, cleanup := makeConsensusState(ctx, t, cfg, n, @@ -537,17 +617,17 @@ func TestReactorVotingPowerChange(t *testing.T) { t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < n; i++ { - pubKey, err := states[i].privValidator.GetPubKey(context.Background()) + pubKey, err := states[i].privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() @@ -559,20 +639,23 @@ func TestReactorVotingPowerChange(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } wg.Wait() - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } - val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) + val1PubKey, err := states[0].privValidator.GetPubKey(ctx) require.NoError(t, err) val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey) @@ -581,10 +664,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) previousTotalVotingPower := states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -596,10 +679,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, states[0].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -610,10 +693,10 @@ func TestReactorVotingPowerChange(t *testing.T) { updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) previousTotalVotingPower = states[0].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlockWithTx(t, n, activeVals, blocksSubs, states, updateValidatorTx) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) - waitForAndValidateBlock(t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlockWithTx(ctx, t, n, activeVals, blocksSubs, states, updateValidatorTx) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, n, activeVals, blocksSubs, states) require.NotEqualf( t, previousTotalVotingPower, states[0].GetRoundState().LastValidators.TotalVotingPower(), @@ -624,11 +707,16 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( + ctx, + t, cfg, nVals, nPeers, @@ -638,17 +726,17 @@ func TestReactorValidatorSetChanges(t *testing.T) { ) t.Cleanup(cleanup) - rts := setup(t, nPeers, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nPeers, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - pubKey, err := states[i].privValidator.GetPubKey(context.Background()) + pubKey, err := states[i].privValidator.GetPubKey(ctx) require.NoError(t, err) activeVals[string(pubKey.Address())] = struct{}{} @@ -659,15 +747,18 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } wg.Wait() - newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) @@ -675,7 +766,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } @@ -683,24 +774,24 @@ func TestReactorValidatorSetChanges(t *testing.T) { // wait till everyone makes block 2 // ensure the commit includes all validators // send newValTx to change vals in block 3 - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 3. // it includes the commit for block 2, which is by the original validator set - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx1) // wait till everyone makes block 4. // it includes the commit for block 3, which is by the original validator set - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) // the commits for block 4 should be with the updated validator set activeVals[string(newValidatorPubKey1.Address())] = struct{}{} // wait till everyone makes block 5 // it includes the commit for block 4, which should have the updated validator set - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) - updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) @@ -709,10 +800,10 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) previousTotalVotingPower := states[nVals].GetRoundState().LastValidators.TotalVotingPower() - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, updateValidatorTx1) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) require.NotEqualf( t, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), previousTotalVotingPower, @@ -720,7 +811,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { previousTotalVotingPower, states[nVals].GetRoundState().LastValidators.TotalVotingPower(), ) - newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) @@ -728,7 +819,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) + newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) @@ -736,24 +827,24 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, newValidatorTx2, newValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) activeVals[string(newValidatorPubKey2.Address())] = struct{}{} activeVals[string(newValidatorPubKey3.Address())] = struct{}{} - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, removeValidatorTx2, removeValidatorTx3) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) delete(activeVals, string(newValidatorPubKey2.Address())) delete(activeVals, string(newValidatorPubKey3.Address())) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, activeVals, blocksSubs, states) } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 60ad0c0414..6250ffc062 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -3,6 +3,7 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "hash/crc32" "io" @@ -11,6 +12,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -36,7 +38,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running. -func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { +func (cs *State) readReplayMessage(ctx context.Context, msg *TimedWALMessage, newStepSub eventbus.Subscription) error { // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil @@ -45,20 +47,20 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // for logging switch m := msg.Msg.(type) { case types.EventDataRoundState: - cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) + cs.logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) // these are playback checks - ticker := time.After(time.Second * 2) if newStepSub != nil { - select { - case stepMsg := <-newStepSub.Out(): - m2 := stepMsg.Data().(types.EventDataRoundState) - if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { - return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) - } - case <-newStepSub.Canceled(): - return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") - case <-ticker: - return fmt.Errorf("failed to read off newStepSub.Out()") + ctxto, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + stepMsg, err := newStepSub.Next(ctxto) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("subscription timed out: %w", err) + } else if err != nil { + return fmt.Errorf("subscription canceled: %w", err) + } + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } } case msgInfo: @@ -69,20 +71,20 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr switch msg := m.Msg.(type) { case *ProposalMessage: p := msg.Proposal - cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", + cs.logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) case *BlockPartMessage: - cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) + cs.logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: v := msg.Vote - cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, + cs.logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, "blockID", v.BlockID, "peer", peerID) } - cs.handleMsg(m) + cs.handleMsg(ctx, m) case timeoutInfo: - cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) - cs.handleTimeout(m, cs.RoundState) + cs.logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) + cs.handleTimeout(ctx, m, cs.RoundState) default: return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) } @@ -91,7 +93,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // Replay only those messages since the last block. `timeoutRoutine` should // run concurrently to read off tickChan. -func (cs *State) catchupReplay(csHeight int64) error { +func (cs *State) catchupReplay(ctx context.Context, csHeight int64) error { // Set replayMode to true so we don't log signing errors. cs.replayMode = true @@ -128,7 +130,7 @@ func (cs *State) catchupReplay(csHeight int64) error { } gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) if err == io.EOF { - cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) + cs.logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) } else if err != nil { return err } @@ -137,7 +139,7 @@ func (cs *State) catchupReplay(csHeight int64) error { } defer gr.Close() - cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + cs.logger.Info("Catchup by replaying consensus messages", "height", csHeight) var msg *TimedWALMessage dec := WALDecoder{gr} @@ -149,7 +151,7 @@ LOOP: case err == io.EOF: break LOOP case IsDataCorruptionError(err): - cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) + cs.logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) return err case err != nil: return err @@ -158,11 +160,11 @@ LOOP: // NOTE: since the priv key is set when the msgs are received // it will attempt to eg double sign but we can just ignore it // since the votes will be replayed and we'll get to the next step - if err := cs.readReplayMessage(msg, nil); err != nil { + if err := cs.readReplayMessage(ctx, msg, nil); err != nil { return err } } - cs.Logger.Info("Replay: Done") + cs.logger.Info("Replay: Done") return nil } @@ -209,42 +211,38 @@ type Handshaker struct { nBlocks int // number of blocks applied to the state } -func NewHandshaker(stateStore sm.Store, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { +func NewHandshaker( + logger log.Logger, + stateStore sm.Store, + state sm.State, + store sm.BlockStore, + eventBus types.BlockEventPublisher, + genDoc *types.GenesisDoc, +) *Handshaker { return &Handshaker{ stateStore: stateStore, initialState: state, store: store, - eventBus: types.NopEventBus{}, + eventBus: eventBus, genDoc: genDoc, - logger: log.NewNopLogger(), + logger: logger, nBlocks: 0, } } -func (h *Handshaker) SetLogger(l log.Logger) { - h.logger = l -} - -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { - h.eventBus = eventBus -} - // NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) + res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo) if err != nil { - return fmt.Errorf("error calling Info: %v", err) + return fmt.Errorf("error calling Info: %w", err) } blockHeight := res.LastBlockHeight @@ -266,9 +264,9 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { - return fmt.Errorf("error on replay: %v", err) + return fmt.Errorf("error on replay: %w", err) } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", @@ -283,6 +281,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( + ctx context.Context, state sm.State, appHash []byte, appBlockHeight int64, @@ -317,7 +316,7 @@ func (h *Handshaker) ReplayBlocks( Validators: nextVals, AppStateBytes: h.genDoc.AppState, } - res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) + res, err := proxyApp.Consensus().InitChain(ctx, req) if err != nil { return nil, err } @@ -391,7 +390,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -406,7 +405,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -414,7 +413,7 @@ func (h *Handshaker) ReplayBlocks( // NOTE: We could instead use the cs.WAL on cs.Start, // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT h.logger.Info("Replay last block using real app") - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus()) return state.AppHash, err case appBlockHeight == storeBlockHeight: @@ -423,9 +422,17 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } - mockApp := newMockProxyApp(appHash, abciResponses) + mockApp, err := newMockProxyApp(ctx, h.logger, appHash, abciResponses) + if err != nil { + return nil, err + } + h.logger.Info("Replay last block using mock app") - state, err = h.replayBlock(state, storeBlockHeight, mockApp) + state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp) + if err != nil { + return nil, err + } + return state.AppHash, err } @@ -436,6 +443,7 @@ func (h *Handshaker) ReplayBlocks( } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, proxyApp proxy.AppConns, appBlockHeight, @@ -475,13 +483,13 @@ func (h *Handshaker) replayBlocks( blockExec := sm.NewBlockExecutor( h.stateStore, h.logger, proxyApp.Consensus(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store) blockExec.SetEventBus(h.eventBus) - appHash, err = sm.ExecCommitBlock( + appHash, err = sm.ExecCommitBlock(ctx, blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err } } else { - appHash, err = sm.ExecCommitBlock( + appHash, err = sm.ExecCommitBlock(ctx, nil, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err @@ -493,7 +501,7 @@ func (h *Handshaker) replayBlocks( if mutateState { // sync the final block - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(ctx, state, storeBlockHeight, proxyApp.Consensus()) if err != nil { return nil, err } @@ -505,7 +513,12 @@ func (h *Handshaker) replayBlocks( } // ApplyBlock on the proxyApp with the last block. -func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { +func (h *Handshaker) replayBlock( + ctx context.Context, + state sm.State, + height int64, + proxyApp proxy.AppConnConsensus, +) (sm.State, error) { block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) @@ -515,7 +528,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec.SetEventBus(h.eventBus) var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, err = blockExec.ApplyBlock(ctx, state, meta.BlockID, block) if err != nil { return sm.State{}, err } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index f60dff531c..e2d855b7c3 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -13,12 +13,12 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" ) @@ -31,16 +31,27 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(cfg, csConfig) +func RunReplayFile( + ctx context.Context, + logger log.Logger, + cfg config.BaseConfig, + csConfig *config.ConsensusConfig, + console bool, +) error { + consensusState, err := newConsensusStateForReplay(ctx, cfg, logger, csConfig) + if err != nil { + return err + } - if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) + if err := consensusState.ReplayFile(ctx, csConfig.WalFile(), console); err != nil { + return fmt.Errorf("consensus replay: %w", err) } + + return nil } // Replay msgs in file or start the console -func (cs *State) ReplayFile(file string, console bool) error { +func (cs *State) ReplayFile(ctx context.Context, file string, console bool) error { if cs.IsRunning() { return errors.New("cs is already running, cannot replay") @@ -53,15 +64,17 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected - ctx := context.Background() - newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} if err := cs.eventBus.Unsubscribe(ctx, args); err != nil { - cs.Logger.Error("Error unsubscribing to event bus", "err", err) + cs.logger.Error("error unsubscribing to event bus", "err", err) } }() @@ -78,7 +91,10 @@ func (cs *State) ReplayFile(file string, console bool) error { var msg *TimedWALMessage for { if nextN == 0 && console { - nextN = pb.replayConsoleLoop() + nextN, err = pb.replayConsoleLoop(ctx) + if err != nil { + return err + } } msg, err = pb.dec.Decode() @@ -88,7 +104,7 @@ func (cs *State) ReplayFile(file string, console bool) error { return err } - if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + if err := pb.cs.readReplayMessage(ctx, msg, newStepSub); err != nil { return err } @@ -125,13 +141,13 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl } // go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { +func (pb *playback) replayReset(ctx context.Context, count int, newStepSub eventbus.Subscription) error { if err := pb.cs.Stop(); err != nil { return err } pb.cs.Wait() - newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + newCS := NewState(ctx, pb.cs.logger, pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool) newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() @@ -157,7 +173,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error } else if err != nil { return err } - if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + if err := pb.cs.readReplayMessage(ctx, msg, newStepSub); err != nil { return err } pb.count++ @@ -166,30 +182,20 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error } func (cs *State) startForReplay() { - cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") - /* TODO:! - // since we replay tocks we just ignore ticks - go func() { - for { - select { - case <-cs.tickChan: - case <-cs.Quit: - return - } - } - }()*/ + cs.logger.Error("Replay commands are disabled until someone updates them and writes tests") } -// console function for parsing input and running commands -func (pb *playback) replayConsoleLoop() int { +// console function for parsing input and running commands. The integer +// return value is invalid unless the error is nil. +func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) line, more, err := bufReader.ReadLine() if more { - tmos.Exit("input is too long") + return 0, fmt.Errorf("input is too long") } else if err != nil { - tmos.Exit(err.Error()) + return 0, err } tokens := strings.Split(string(line), " ") @@ -203,13 +209,13 @@ func (pb *playback) replayConsoleLoop() int { // "next N" -> replay next N messages if len(tokens) == 1 { - return 0 + return 0, nil } i, err := strconv.Atoi(tokens[1]) if err != nil { fmt.Println("next takes an integer argument") } else { - return i + return i, nil } case "back": @@ -219,23 +225,25 @@ func (pb *playback) replayConsoleLoop() int { // NOTE: "back" is not supported in the state machine design, // so we restart and replay up to - ctx := context.Background() // ensure all new step events are regenerated as expected - newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { - tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + return 0, fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil { - pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) + pb.cs.logger.Error("error unsubscribing from eventBus", "err", err) } }() if len(tokens) == 1 { - if err := pb.replayReset(1, newStepSub); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) + if err := pb.replayReset(ctx, 1, newStepSub); err != nil { + pb.cs.logger.Error("Replay reset error", "err", err) } } else { i, err := strconv.Atoi(tokens[1]) @@ -243,8 +251,8 @@ func (pb *playback) replayConsoleLoop() int { fmt.Println("back takes an integer argument") } else if i > pb.count { fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) - } else if err := pb.replayReset(i, newStepSub); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) + } else if err := pb.replayReset(ctx, i, newStepSub); err != nil { + pb.cs.logger.Error("Replay reset error", "err", err) } } @@ -286,56 +294,62 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State { +func newConsensusStateForReplay( + ctx context.Context, + cfg config.BaseConfig, + logger log.Logger, + csConfig *config.ConsensusConfig, +) (*State, error) { dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get State stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + stateStore := sm.NewStore(stateDB) gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + state, err := sm.MakeGenesisState(gdoc) if err != nil { - tmos.Exit(err.Error()) + return nil, err } // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err = proxyApp.Start() + clientCreator, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) if err != nil { - tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + return nil, fmt.Errorf("starting proxy app conns: %w", err) } - eventBus := types.NewEventBus() - if err := eventBus.Start(); err != nil { - tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + eventBus := eventbus.NewDefault(logger) + if err := eventBus.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start event bus: %w", err) } - handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) - handshaker.SetEventBus(eventBus) - err = handshaker.Handshake(proxyApp) - if err != nil { - tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + handshaker := NewHandshaker(logger, stateStore, state, blockStore, eventBus, gdoc) + + if err = handshaker.Handshake(ctx, proxyApp); err != nil { + return nil, err } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(csConfig, state.Copy(), blockExec, + consensusState := NewState(ctx, logger, csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) - return consensusState + return consensusState, nil } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 1235baccbf..ec16ed5568 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -28,6 +29,7 @@ func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( + _ context.Context, _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, @@ -36,11 +38,11 @@ func (emptyMempool) Update( ) error { return nil } -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn() error { return nil } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) SizeBytes() int64 { return 0 } +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn(ctx context.Context) error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) TxsFront() *clist.CElement { return nil } func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } @@ -54,17 +56,27 @@ func (emptyMempool) CloseWAL() {} // Useful because we don't want to call Commit() twice for the same block on // the real app. -func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { +func newMockProxyApp( + ctx context.Context, + logger log.Logger, + appHash []byte, + abciResponses *tmstate.ABCIResponses, +) (proxy.AppConnConsensus, error) { + clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ appHash: appHash, abciResponses: abciResponses, }) - cli, _ := clientCreator() - err := cli.Start() + cli, err := clientCreator(logger) if err != nil { - panic(err) + return nil, err + } + + if err = cli.Start(ctx); err != nil { + return nil, err } - return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) + + return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()), nil } type mockProxyApp struct { diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 0d0ae36e81..566aa8bff8 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -3,14 +3,13 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" "runtime" - "sort" "testing" "time" @@ -25,8 +24,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" @@ -54,58 +55,65 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config, +func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) - privValidator := loadPrivValidator(consensusReplayConfig) + privValidator := loadPrivValidator(t, consensusReplayConfig) blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + ctx, + t, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) - t.Logf("====== WAL: \n\r%X\n", bytes) - - err = cs.Start() + bytes, err := os.ReadFile(cs.config.WalFile()) require.NoError(t, err) + require.NotNil(t, bytes) + + require.NoError(t, cs.Start(ctx)) defer func() { if err := cs.Stop(); err != nil { t.Error(err) } }() - + t.Cleanup(cs.Wait) // This is just a signal that we haven't halted; its not something contained // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + newBlockSub, err := cs.eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-newBlockSub.Out(): - case <-newBlockSub.Canceled(): - t.Fatal("newBlockSub was canceled") - case <-time.After(120 * time.Second): + ctxto, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + _, err = newBlockSub.Next(ctxto) + if errors.Is(err, context.DeadlineExceeded) { t.Fatal("Timed out waiting for new block (see trace above)") + } else if err != nil { + t.Fatal("newBlockSub was canceled") } } -func sendTxs(ctx context.Context, cs *State) { +func sendTxs(ctx context.Context, t *testing.T, cs *State) { + t.Helper() for i := 0; i < 256; i++ { select { case <-ctx.Done(): return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - panic(err) - } + + require.NoError(t, assertMempool(t, cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{})) + i++ } } @@ -113,6 +121,9 @@ func sendTxs(ctx context.Context, cs *State) { // TestWALCrash uses crashing WAL to test we can recover from any WAL failure. func TestWALCrash(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testCases := []struct { name string initFn func(dbm.DB, *State, context.Context) @@ -123,7 +134,7 @@ func TestWALCrash(t *testing.T) { 1}, {"many non-empty blocks", func(stateDB dbm.DB, cs *State, ctx context.Context) { - go sendTxs(ctx, cs) + go sendTxs(ctx, t, cs) }, 3}, } @@ -131,13 +142,14 @@ func TestWALCrash(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - consensusReplayConfig := ResetConfig(tc.name) - crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) + consensusReplayConfig, err := ResetConfig(tc.name) + require.NoError(t, err) + crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config, +func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -145,8 +157,6 @@ func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config i := 1 LOOP: for { - t.Logf("====== LOOP %d\n", i) - // create consensus state from a clean slate logger := log.NewNopLogger() blockDB := dbm.NewMemDB() @@ -155,18 +165,20 @@ LOOP: blockStore := store.NewBlockStore(blockDB) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) - privValidator := loadPrivValidator(consensusReplayConfig) + privValidator := loadPrivValidator(t, consensusReplayConfig) cs := newStateWithConfigAndBlockStore( + rctx, + t, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) // start sending transactions - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(rctx) initFn(stateDB, cs, ctx) // clean up WAL file from the previous iteration @@ -174,7 +186,7 @@ LOOP: os.Remove(walFile) // set crashing WAL - csWal, err := cs.OpenWAL(walFile) + csWal, err := cs.OpenWAL(ctx, walFile) require.NoError(t, err) crashingWal.next = csWal @@ -183,17 +195,17 @@ LOOP: cs.wal = crashingWal // start consensus state - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) i++ select { + case <-rctx.Done(): + t.Fatal("context canceled before test completed") case err := <-walPanicked: - t.Logf("WAL panicked: %v", err) - // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -279,9 +291,9 @@ func (w *crashingWAL) SearchForEndHeight( return w.next.SearchForEndHeight(height, options) } -func (w *crashingWAL) Start() error { return w.next.Start() } -func (w *crashingWAL) Stop() error { return w.next.Stop() } -func (w *crashingWAL) Wait() { w.next.Wait() } +func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) } +func (w *crashingWAL) Stop() error { return w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { @@ -309,7 +321,7 @@ const ( var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay -func setupSimulator(t *testing.T) *simulatorTestSuite { +func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { t.Helper() cfg := configSetup(t) @@ -322,6 +334,8 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nVals := 4 css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + ctx, + t, cfg, nVals, nPeers, @@ -329,13 +343,16 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { newMockTickerFunc(true), newPersistentKVStoreWithPath) sim.Config = cfg - sim.GenesisState, _ = sm.MakeGenesisState(genDoc) + + var err error + sim.GenesisState, err = sm.MakeGenesisState(genDoc) + require.NoError(t, err) sim.CleanupFunc = cleanup partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { @@ -344,208 +361,225 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height, round := css[0].Height, css[0].Round // start the machine - startTestRound(css[0], height, round) + startTestRound(ctx, css[0], height, round) incrementHeight(vss...) - ensureNewRound(newRoundCh, height, 0) - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, 0) + ensureNewProposal(t, proposalCh, height, round) rs := css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 2 height++ incrementHeight(vss...) - newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{}) - assert.Nil(t, err) - propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts := propBlock.MakePartSet(partSize) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{}) + assert.NoError(t, err) + propBlock, _, err := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) + require.NoError(t, err) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vss[1].Height, round, -1, blockID) + proposal := types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 3 height++ incrementHeight(vss...) - updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(ctx) require.NoError(t, err) updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{}) - assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{}) + assert.NoError(t, err) + propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) + require.NoError(t, err) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, round, -1, blockID) + proposal = types.NewProposal(vss[2].Height, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() - if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(sim.Config, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 4 height++ incrementHeight(vss...) - newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{}) - assert.Nil(t, err) - newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{}) + assert.NoError(t, err) + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(ctx) require.NoError(t, err) newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{}) - assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{}) + assert.NoError(t, err) + propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) + require.NoError(t, err) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss := make([]*validatorStub, nVals+1) copy(newVss, vss[:nVals+1]) - sort.Sort(ValidatorStubsByPower(newVss)) + newVss = sortVValidatorStubsByPower(ctx, t, newVss) valIndexFn := func(cssIdx int) int { for i, vs := range newVss { - vsPubKey, err := vs.GetPubKey(context.Background()) + vsPubKey, err := vs.GetPubKey(ctx) require.NoError(t, err) - cssPubKey, err := css[cssIdx].privValidator.GetPubKey(context.Background()) + cssPubKey, err := css[cssIdx].privValidator.GetPubKey(ctx) require.NoError(t, err) if vsPubKey.Equals(cssPubKey) { return i } } - panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx)) + t.Fatalf("validator css[%d] not found in newVss", cssIdx) + return -1 } selfIndex := valIndexFn(0) + require.NotEqual(t, -1, selfIndex) - proposal = types.NewProposal(vss[3].Height, round, -1, blockID) + proposal = types.NewProposal(vss[3].Height, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() - if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{}) - assert.Nil(t, err) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{}) + assert.NoError(t, err) rs = css[0].GetRoundState() for i := 0; i < nVals+1; i++ { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], - tmproto.PrecommitType, rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(ctx, t, css[0], + tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, + newVss[i]) } - - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 5 height++ incrementHeight(vss...) // Reflect the changes to vss[nVals] at height 3 and resort newVss. newVssIdx := valIndexFn(nVals) + require.NotEqual(t, -1, newVssIdx) + newVss[newVssIdx].VotingPower = 25 - sort.Sort(ValidatorStubsByPower(newVss)) + newVss = sortVValidatorStubsByPower(ctx, t, newVss) + selfIndex = valIndexFn(0) - ensureNewProposal(proposalCh, height, round) + require.NotEqual(t, -1, selfIndex) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() for i := 0; i < nVals+1; i++ { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], - tmproto.PrecommitType, rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(ctx, t, css[0], + tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, + newVss[i]) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 6 height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{}) - assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{}) + assert.NoError(t, err) + propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) + require.NoError(t, err) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss = make([]*validatorStub, nVals+3) copy(newVss, vss[:nVals+3]) - sort.Sort(ValidatorStubsByPower(newVss)) + newVss = sortVValidatorStubsByPower(ctx, t, newVss) selfIndex = valIndexFn(0) - proposal = types.NewProposal(vss[1].Height, round, -1, blockID) + require.NotEqual(t, -1, selfIndex) + proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { + if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() for i := 0; i < nVals+3; i++ { if i == selfIndex { continue } - signAddVotes(sim.Config, css[0], - tmproto.PrecommitType, rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(ctx, t, css[0], + tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, + newVss[i]) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) sim.Chain = make([]*types.Block, 0) sim.Commits = make([]*types.Commit, 0) @@ -562,55 +596,70 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, false) + testHandshakeReplay(ctx, t, sim, 0, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, true) + testHandshakeReplay(ctx, t, sim, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, false) + testHandshakeReplay(ctx, t, sim, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, true) + testHandshakeReplay(ctx, t, sim, 2, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks, m, true) } } // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx func TestMockProxyApp(t *testing.T) { - sim := setupSimulator(t) // setup config and simulator + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) // setup config and simulator cfg := sim.Config assert.NotNil(t, cfg) @@ -632,65 +681,70 @@ func TestMockProxyApp(t *testing.T) { err = proto.Unmarshal(bytes, loadedAbciRes) require.NoError(t, err) - mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) + mock, err := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes) + require.NoError(t, err) abciRes := new(tmstate.ABCIResponses) abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) - // Execute transactions and get hash. - proxyCb := func(req *abci.Request, res *abci.Response) { - if r, ok := res.Value.(*abci.Response_DeliverTx); ok { - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - abciRes.DeliverTxs[txIndex] = txRes - txIndex++ - } - } - mock.SetResponseCallback(proxyCb) someTx := []byte("tx") - _, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx}) + resp, err := mock.DeliverTx(ctx, abci.RequestDeliverTx{Tx: someTx}) + // TODO: make use of res.Log + // TODO: make use of this info + // Blocks may include invalid txs. + if resp.Code == abci.CodeTypeOK { + validTxs++ + } else { + invalidTxs++ + } + abciRes.DeliverTxs[txIndex] = resp + txIndex++ + assert.NoError(t, err) }) assert.True(t, validTxs == 1) assert.True(t, invalidTxs == 0) } -func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") - if err != nil { - panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) - } +func tempWALWithData(t *testing.T, data []byte) string { + t.Helper() + + walFile, err := os.CreateTemp("", "wal") + require.NoError(t, err, "failed to create temp WAL file") + _, err = walFile.Write(data) - if err != nil { - panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) - } - if err := walFile.Close(); err != nil { - panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) - } + require.NoError(t, err, "failed to write to temp WAL file") + + require.NoError(t, walFile.Close(), "failed to close temp WAL file") + return walFile.Name() } // Make some blocks. Start a fresh app and apply nBlocks blocks. // Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) { +func testHandshakeReplay( + rctx context.Context, + t *testing.T, + sim *simulatorTestSuite, + nBlocks int, + mode uint, + testValidatorsChange bool, +) { var chain []*types.Block var commits []*types.Commit var store *mockBlockStore var stateDB dbm.DB var genesisState sm.State + ctx, cancel := context.WithCancel(rctx) + t.Cleanup(cancel) + cfg := sim.Config + logger := log.TestingLogger() if testValidatorsChange { - testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() @@ -698,33 +752,28 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod cfg = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits - store = newMockBlockStore(cfg, genesisState.ConsensusParams) + store = newMockBlockStore(t, cfg, genesisState.ConsensusParams) } else { // test single node - testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() - walBody, err := WALWithNBlocks(t, numBlocks) + walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks) require.NoError(t, err) - walFile := tempWALWithData(walBody) + walFile := tempWALWithData(t, walBody) cfg.Consensus.SetWalFile(walFile) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - wal, err := NewWAL(walFile) + wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - }) - chain, commits, err = makeBlockchainFromWAL(wal) - require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + t.Cleanup(func() { cancel(); wal.Wait() }) + chain, commits = makeBlockchainFromWAL(t, wal) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion) + stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion) } stateStore := sm.NewStore(stateDB) @@ -733,11 +782,24 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(cfg, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store) + state = buildTMStateFromChain( + ctx, + t, + cfg, + logger, + sim.Mempool, + sim.Evpool, + stateStore, + state, + chain, + nBlocks, + mode, + store, + ) latestAppHash := state.AppHash // make a new client creator - kvstoreApp := kvstore.NewPersistentKVStoreApplication( + kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) @@ -745,12 +807,12 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) - buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) + buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store) } // Prune block store if requested @@ -763,29 +825,23 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - err := handshaker.Handshake(proxyApp) + err = handshaker.Handshake(ctx, proxyApp) if expectError { require.Error(t, err) return - } else if err != nil { - t.Fatalf("Error on abci handshake: %v", err) } + require.NoError(t, err, "Error on abci handshake") // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""}) + res, err := proxyApp.Query().Info(ctx, abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -810,25 +866,31 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod } } -func applyBlock(stateStore sm.Store, +func applyBlock( + ctx context.Context, + t *testing.T, + stateStore sm.Store, mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, proxyApp proxy.AppConns, - blockStore *mockBlockStore) sm.State { + blockStore *mockBlockStore, +) sm.State { testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) - if err != nil { - panic(err) - } + bps, err := blk.MakePartSet(testPartSize) + require.NoError(t, err) + blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()} + newState, err := blockExec.ApplyBlock(ctx, st, blkID, blk) + require.NoError(t, err) return newState } func buildAppStateFromChain( + ctx context.Context, + t *testing.T, proxyApp proxy.AppConns, stateStore sm.Store, mempool mempool.Mempool, @@ -837,48 +899,49 @@ func buildAppStateFromChain( chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) { + blockStore *mockBlockStore, +) { + t.Helper() // start a new app without handshake, play nBlocks blocks - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() //nolint:errcheck // ignore + require.NoError(t, proxyApp.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + _, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{ Validators: validators, - }); err != nil { - panic(err) - } - if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo - panic(err) - } + }) + require.NoError(t, err) + + require.NoError(t, stateStore.Save(state)) // save height 1's validatorsInfo + switch mode { case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore) } default: - panic(fmt.Sprintf("unknown mode %v", mode)) + require.Fail(t, "unknown mode %v", mode) } } func buildTMStateFromChain( + ctx context.Context, + t *testing.T, cfg *config.Config, + logger log.Logger, mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, @@ -886,48 +949,47 @@ func buildTMStateFromChain( chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) sm.State { + blockStore *mockBlockStore, +) sm.State { + t.Helper() + // run the whole chain against this client to build up the tendermint state - kvstoreApp := kvstore.NewPersistentKVStoreApplication( + kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() clientCreator := abciclient.NewLocalCreator(kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() //nolint:errcheck + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + _, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{ Validators: validators, - }); err != nil { - panic(err) - } - if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo - panic(err) - } + }) + require.NoError(t, err) + + require.NoError(t, stateStore.Save(state)) + switch mode { case 0: // sync right up for _, block := range chain { - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore) + applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore) default: - panic(fmt.Sprintf("unknown mode %v", mode)) + require.Fail(t, "unknown mode %v", mode) } return state @@ -938,21 +1000,30 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x01 // - 0x02 // - 0x03 - cfg := ResetConfig("handshake_test_") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) + stateDB, state, store := stateAndStore(t, cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks - blocks := sf.MakeBlocks(3, &state, privVal) + blocks := sf.MakeBlocks(ctx, t, 3, &state, privVal) + store.chain = blocks + logger := log.TestingLogger() + // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 @@ -960,18 +1031,14 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, allHashesAreWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err := proxyApp.Start() + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -984,18 +1051,14 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err := proxyApp.Start() + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) assert.Panics(t, func() { - h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + h := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + if err = h.Handshake(ctx, proxyApp); err != nil { t.Log(err) } }) @@ -1027,17 +1090,14 @@ func (app *badApp) Commit() abci.ResponseCommit { //-------------------------- // utils for making blocks -func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { +func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Commit) { + t.Helper() var height int64 // Search for height marker gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) - if err != nil { - return nil, nil, err - } - if !found { - return nil, nil, fmt.Errorf("wal does not contain height %d", height) - } + require.NoError(t, err) + require.True(t, found, "wal does not contain height %d", height) defer gr.Close() // log.Notice("Build a blockchain by reading from the WAL") @@ -1054,9 +1114,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { msg, err := dec.Decode() if err == io.EOF { break - } else if err != nil { - return nil, nil, err } + require.NoError(t, err) piece := readPieceFromWAL(msg) if piece == nil { @@ -1068,26 +1127,21 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // if its not the first one, we have a full block if thisBlockParts != nil { var pbb = new(tmproto.Block) - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) - if err != nil { - panic(err) - } - err = proto.Unmarshal(bz, pbb) - if err != nil { - panic(err) - } + bz, err := io.ReadAll(thisBlockParts.GetReader()) + require.NoError(t, err) + + require.NoError(t, proto.Unmarshal(bz, pbb)) + block, err := types.BlockFromProto(pbb) - if err != nil { - panic(err) - } + require.NoError(t, err) + + require.Equal(t, block.Height, height+1, + "read bad block from wal. got height %d, expected %d", block.Height, height+1) - if block.Height != height+1 { - panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) - } commitHeight := thisBlockCommit.Height - if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) - } + require.Equal(t, commitHeight, height+1, + "commit doesnt match. got height %d, expected %d", commitHeight, height+1) + blocks = append(blocks, block) commits = append(commits, thisBlockCommit) height++ @@ -1096,9 +1150,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { thisBlockParts = types.NewPartSetFromHeader(*p) case *types.Part: _, err := thisBlockParts.AddPart(p) - if err != nil { - return nil, nil, err - } + require.NoError(t, err) case *types.Vote: if p.Type == tmproto.PrecommitType { thisBlockCommit = types.NewCommit(p.Height, p.Round, @@ -1107,29 +1159,22 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } } // grab the last block too - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) - if err != nil { - panic(err) - } + bz, err := io.ReadAll(thisBlockParts.GetReader()) + require.NoError(t, err) + var pbb = new(tmproto.Block) - err = proto.Unmarshal(bz, pbb) - if err != nil { - panic(err) - } + require.NoError(t, proto.Unmarshal(bz, pbb)) + block, err := types.BlockFromProto(pbb) - if err != nil { - panic(err) - } - if block.Height != height+1 { - panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) - } + require.NoError(t, err) + + require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) commitHeight := thisBlockCommit.Height - if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) - } + require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1) + blocks = append(blocks, block) commits = append(commits, thisBlockCommit) - return blocks, commits, nil + return blocks, commits } func readPieceFromWAL(msg *TimedWALMessage) interface{} { @@ -1153,17 +1198,19 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { // fresh state and mock store func stateAndStore( + t *testing.T, cfg *config.Config, pubKey crypto.PubKey, - appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { + appVersion uint64, +) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + require.NoError(t, err) state.Version.Consensus.App = appVersion - store := newMockBlockStore(cfg, state.ConsensusParams) - if err := stateStore.Save(state); err != nil { - panic(err) - } + store := newMockBlockStore(t, cfg, state.ConsensusParams) + require.NoError(t, stateStore.Save(state)) + return stateDB, state, store } @@ -1176,11 +1223,16 @@ type mockBlockStore struct { chain []*types.Block commits []*types.Commit base int64 + t *testing.T } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{cfg, params, nil, nil, 0} +func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{ + cfg: cfg, + params: params, + t: t, + } } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } @@ -1191,10 +1243,13 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] } +func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(bs.t, err) return &types.BlockMeta{ - BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, + BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}, Header: block.Header, } } @@ -1223,38 +1278,40 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { // Test handshake/init chain func TestHandshakeUpdatesValidators(t *testing.T) { - val, _ := factory.RandValidator(true, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + votePower := 10 + int64(rand.Uint32()) + val, _, err := factory.Validator(ctx, votePower) + require.NoError(t, err) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := abciclient.NewLocalCreator(app) - cfg := ResetConfig("handshake_test_") + cfg, err := ResetConfig("handshake_test_") + require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(cfg, pubKey, 0x0) + stateDB, state, store := stateAndStore(t, cfg, pubKey, 0x0) stateStore := sm.NewStore(stateDB) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) - handshaker := NewHandshaker(stateStore, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - if err := handshaker.Handshake(proxyApp); err != nil { - t.Fatalf("Error on abci handshake: %v", err) - } + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) + + logger := log.TestingLogger() + handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc) + proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") + + require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake") + // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 42900a7d48..2cee22972d 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -3,11 +3,14 @@ package consensus import ( "bytes" "context" + "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "os" "runtime/debug" + "sort" + "sync" "time" "github.com/gogo/protobuf/proto" @@ -15,11 +18,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - "github.com/tendermint/tendermint/internal/libs/fail" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/jsontypes" sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" @@ -45,18 +47,49 @@ var msgQueueSize = 1000 // msgs from the reactor which may update the state type msgInfo struct { - Msg Message `json:"msg"` - PeerID types.NodeID `json:"peer_key"` + Msg Message + PeerID types.NodeID + ReceiveTime time.Time +} + +func (msgInfo) TypeTag() string { return "tendermint/wal/MsgInfo" } + +type msgInfoJSON struct { + Msg json.RawMessage `json:"msg"` + PeerID types.NodeID `json:"peer_key"` + ReceiveTime time.Time `json:"receive_time"` +} + +func (m msgInfo) MarshalJSON() ([]byte, error) { + msg, err := jsontypes.Marshal(m.Msg) + if err != nil { + return nil, err + } + return json.Marshal(msgInfoJSON{Msg: msg, PeerID: m.PeerID, ReceiveTime: m.ReceiveTime}) +} + +func (m *msgInfo) UnmarshalJSON(data []byte) error { + var msg msgInfoJSON + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if err := jsontypes.Unmarshal(msg.Msg, &m.Msg); err != nil { + return err + } + m.PeerID = msg.PeerID + return nil } // internally generated messages which may update the state type timeoutInfo struct { - Duration time.Duration `json:"duration"` - Height int64 `json:"height"` + Duration time.Duration `json:"duration,string"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step cstypes.RoundStepType `json:"step"` } +func (timeoutInfo) TypeTag() string { return "tendermint/wal/TimeoutInfo" } + func (ti *timeoutInfo) String() string { return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) } @@ -78,6 +111,7 @@ type evidencePool interface { // The internal state machine receives input from peers, the internal validator, and from a timer. type State struct { service.BaseService + logger log.Logger // config details config *config.ConsensusConfig @@ -98,7 +132,7 @@ type State struct { evpool evidencePool // internal state - mtx tmsync.RWMutex + mtx sync.RWMutex cstypes.RoundState state sm.State // State until height-1. // privValidator pubkey, memoized for the duration of one block @@ -117,7 +151,7 @@ type State struct { // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus + eventBus *eventbus.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes @@ -129,9 +163,9 @@ type State struct { nSteps int // some functions can be overwritten for testing - decideProposal func(height int64, round int32) - doPrevote func(height int64, round int32) - setProposal func(proposal *types.Proposal) error + decideProposal func(ctx context.Context, height int64, round int32) + doPrevote func(ctx context.Context, height int64, round int32) + setProposal func(proposal *types.Proposal, t time.Time) error // closed when we finish shutting down done chan struct{} @@ -152,6 +186,8 @@ type StateOption func(*State) // NewState returns a new State. func NewState( + ctx context.Context, + logger log.Logger, cfg *config.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, @@ -161,19 +197,20 @@ func NewState( options ...StateOption, ) *State { cs := &State{ + logger: logger, config: cfg, blockExec: blockExec, blockStore: blockStore, txNotifier: txNotifier, peerMsgQueue: make(chan msgInfo, msgQueueSize), internalMsgQueue: make(chan msgInfo, msgQueueSize), - timeoutTicker: NewTimeoutTicker(), + timeoutTicker: NewTimeoutTicker(logger), statsMsgQueue: make(chan msgInfo, msgQueueSize), done: make(chan struct{}), doWALCatchup: true, wal: nilWAL{}, evpool: evpool, - evsw: tmevents.NewEventSwitch(), + evsw: tmevents.NewEventSwitch(logger), metrics: NopMetrics(), onStopCh: make(chan *cstypes.RoundState), } @@ -188,11 +225,11 @@ func NewState( cs.reconstructLastCommit(state) } - cs.updateToState(state) + cs.updateToState(ctx, state) // NOTE: we do not call scheduleRound0 yet, we do that upon Start() - cs.BaseService = *service.NewBaseService(nil, "State", cs) + cs.BaseService = *service.NewBaseService(logger, "State", cs) for _, option := range options { option(cs) } @@ -200,14 +237,8 @@ func NewState( return cs } -// SetLogger implements Service. -func (cs *State) SetLogger(l log.Logger) { - cs.BaseService.Logger = l - cs.timeoutTicker.SetLogger(l) -} - // SetEventBus sets event bus. -func (cs *State) SetEventBus(b *types.EventBus) { +func (cs *State) SetEventBus(b *eventbus.EventBus) { cs.eventBus = b cs.blockExec.SetEventBus(b) } @@ -254,14 +285,14 @@ func (cs *State) GetRoundState() *cstypes.RoundState { func (cs *State) GetRoundStateJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return tmjson.Marshal(cs.RoundState) + return json.Marshal(cs.RoundState) } // GetRoundStateSimpleJSON returns a json of RoundStateSimple func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return tmjson.Marshal(cs.RoundState.RoundStateSimple()) + return json.Marshal(cs.RoundState.RoundStateSimple()) } // GetValidators returns a copy of the current validators. @@ -273,7 +304,7 @@ func (cs *State) GetValidators() (int64, []*types.Validator) { // SetPrivValidator sets the private validator account for signing votes. It // immediately requests pubkey and caches it. -func (cs *State) SetPrivValidator(priv types.PrivValidator) { +func (cs *State) SetPrivValidator(ctx context.Context, priv types.PrivValidator) { cs.mtx.Lock() defer cs.mtx.Unlock() @@ -294,13 +325,13 @@ func (cs *State) SetPrivValidator(priv types.PrivValidator) { case *types.ErroringMockPV: cs.privValidatorType = types.ErrorMockSignerClient default: - cs.Logger.Error("unsupported priv validator type", "err", + cs.logger.Error("unsupported priv validator type", "err", fmt.Errorf("error privValidatorType %s", t)) } } - if err := cs.updatePrivValidatorPubKey(); err != nil { - cs.Logger.Error("failed to get private validator pubkey", "err", err) + if err := cs.updatePrivValidatorPubKey(ctx); err != nil { + cs.logger.Error("failed to get private validator pubkey", "err", err) } } @@ -333,11 +364,11 @@ func (cs *State) LoadCommit(height int64) *types.Commit { // OnStart loads the latest state via the WAL, and starts the timeout and // receive routines. -func (cs *State) OnStart() error { +func (cs *State) OnStart(ctx context.Context) error { // We may set the WAL in testing before calling Start, so only OpenWAL if its // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } @@ -349,23 +380,24 @@ func (cs *State) OnStart() error { LOOP: for { - err := cs.catchupReplay(cs.Height) + err := cs.catchupReplay(ctx, cs.Height) switch { case err == nil: break LOOP case !IsDataCorruptionError(err): - cs.Logger.Error("error on catchup replay; proceeding to start state anyway", "err", err) + cs.logger.Error("error on catchup replay; proceeding to start state anyway", "err", err) break LOOP case repairAttempted: return err } - cs.Logger.Error("the WAL file is corrupted; attempting repair", "err", err) + cs.logger.Error("the WAL file is corrupted; attempting repair", "err", err) // 1) prep work if err := cs.wal.Stop(); err != nil { + return err } @@ -377,24 +409,24 @@ func (cs *State) OnStart() error { return err } - cs.Logger.Debug("backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + cs.logger.Debug("backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) // 3) try to repair (WAL file will be overwritten!) if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { - cs.Logger.Error("the WAL repair failed", "err", err) + cs.logger.Error("the WAL repair failed", "err", err) return err } - cs.Logger.Info("successful WAL repair") + cs.logger.Info("successful WAL repair") // reload WAL file - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } } - if err := cs.evsw.Start(); err != nil { + if err := cs.evsw.Start(ctx); err != nil { return err } @@ -403,7 +435,7 @@ func (cs *State) OnStart() error { // NOTE: we will get a build up of garbage go routines // firing on the tockChan until the receiveRoutine is started // to deal with them (by that point, at most one will be valid) - if err := cs.timeoutTicker.Start(); err != nil { + if err := cs.timeoutTicker.Start(ctx); err != nil { return err } @@ -413,7 +445,7 @@ func (cs *State) OnStart() error { } // now start the receiveRoutine - go cs.receiveRoutine(0) + go cs.receiveRoutine(ctx, 0) // schedule the first round! // use GetRoundState so we don't race the receiveRoutine for access @@ -424,21 +456,23 @@ func (cs *State) OnStart() error { // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions -func (cs *State) startRoutines(maxSteps int) { - err := cs.timeoutTicker.Start() +// +// this is only used in tests. +func (cs *State) startRoutines(ctx context.Context, maxSteps int) { + err := cs.timeoutTicker.Start(ctx) if err != nil { - cs.Logger.Error("failed to start timeout ticker", "err", err) + cs.logger.Error("failed to start timeout ticker", "err", err) return } - go cs.receiveRoutine(maxSteps) + go cs.receiveRoutine(ctx, maxSteps) } // loadWalFile loads WAL data from file. It overwrites cs.wal. -func (cs *State) loadWalFile() error { - wal, err := cs.OpenWAL(cs.config.WalFile()) +func (cs *State) loadWalFile(ctx context.Context) error { + wal, err := cs.OpenWAL(ctx, cs.config.WalFile()) if err != nil { - cs.Logger.Error("failed to load state WAL", "err", err) + cs.logger.Error("failed to load state WAL", "err", err) return err } @@ -448,24 +482,31 @@ func (cs *State) loadWalFile() error { // OnStop implements service.Service. func (cs *State) OnStop() { - // If the node is committing a new block, wait until it is finished! if cs.GetRoundState().Step == cstypes.RoundStepCommit { select { case <-cs.onStopCh: case <-time.After(cs.config.TimeoutCommit): - cs.Logger.Error("OnStop: timeout waiting for commit to finish", "time", cs.config.TimeoutCommit) + cs.logger.Error("OnStop: timeout waiting for commit to finish", "time", cs.config.TimeoutCommit) } } close(cs.onStopCh) - if err := cs.evsw.Stop(); err != nil { - cs.Logger.Error("failed trying to stop eventSwitch", "error", err) + if cs.evsw.IsRunning() { + if err := cs.evsw.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.logger.Error("failed trying to stop eventSwitch", "error", err) + } + } } - if err := cs.timeoutTicker.Stop(); err != nil { - cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + if cs.timeoutTicker.IsRunning() { + if err := cs.timeoutTicker.Stop(); err != nil { + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.logger.Error("failed trying to stop timeoutTicket", "error", err) + } + } } // WAL is stopped in receiveRoutine. } @@ -479,17 +520,15 @@ func (cs *State) Wait() { // OpenWAL opens a file to log all consensus messages and timeouts for // deterministic accountability. -func (cs *State) OpenWAL(walFile string) (WAL, error) { - wal, err := NewWAL(walFile) +func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) { + wal, err := NewWAL(ctx, cs.logger.With("wal", walFile), walFile) if err != nil { - cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) + cs.logger.Error("failed to open WAL", "file", walFile, "err", err) return nil, err } - wal.SetLogger(cs.Logger.With("wal", walFile)) - - if err := wal.Start(); err != nil { - cs.Logger.Error("failed to start WAL", "err", err) + if err := wal.Start(ctx); err != nil { + cs.logger.Error("failed to start WAL", "err", err) return nil, err } @@ -504,58 +543,85 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) { // TODO: should these return anything or let callers just use events? // AddVote inputs a vote. -func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (cs *State) AddVote(ctx context.Context, vote *types.Vote, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return false, nil } // SetProposal inputs a proposal. -func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error { +func (cs *State) SetProposal(ctx context.Context, proposal *types.Proposal, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return nil } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error { - +func (cs *State) AddProposalBlockPart(ctx context.Context, height int64, round int32, part *types.Part, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return nil } // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( + ctx context.Context, proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID types.NodeID, ) error { - if err := cs.SetProposal(proposal, peerID); err != nil { + if err := cs.SetProposal(ctx, proposal, peerID); err != nil { return err } for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) - if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + if err := cs.AddProposalBlockPart(ctx, proposal.Height, proposal.Round, part, peerID); err != nil { return err } } @@ -578,7 +644,7 @@ func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { // enterNewRound(height, 0) at cs.StartTime. func (cs *State) scheduleRound0(rs *cstypes.RoundState) { - // cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + // cs.logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) sleepDuration := rs.StartTime.Sub(tmtime.Now()) cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } @@ -589,16 +655,22 @@ func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int } // send a msg into the receiveRoutine regarding our own proposal, block part, or vote -func (cs *State) sendInternalMessage(mi msgInfo) { +func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { select { + case <-ctx.Done(): case cs.internalMsgQueue <- mi: default: // NOTE: using the go-routine means our votes can // be processed out of order. // TODO: use CList here for strict determinism and // attempt push to internalMsgQueue in receiveRoutine - cs.Logger.Debug("internal msg queue is full; using a go-routine") - go func() { cs.internalMsgQueue <- mi }() + cs.logger.Debug("internal msg queue is full; using a go-routine") + go func() { + select { + case <-ctx.Done(): + case cs.internalMsgQueue <- mi: + } + }() } } @@ -627,7 +699,7 @@ func (cs *State) reconstructLastCommit(state sm.State) { // Updates State and increments height to match that of state. // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. -func (cs *State) updateToState(state sm.State) { +func (cs *State) updateToState(ctx context.Context, state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { panic(fmt.Sprintf( "updateToState() expected state height of %v but found %v", @@ -657,12 +729,12 @@ func (cs *State) updateToState(state sm.State) { // signal the new round step, because other services (eg. txNotifier) // depend on having an up-to-date peer state! if state.LastBlockHeight <= cs.state.LastBlockHeight { - cs.Logger.Debug( + cs.logger.Debug( "ignoring updateToState()", "new_height", state.LastBlockHeight+1, "old_height", cs.state.LastBlockHeight+1, ) - cs.newStep() + cs.newStep(ctx) return } } @@ -715,6 +787,7 @@ func (cs *State) updateToState(state sm.State) { cs.Validators = validators cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil cs.LockedRound = -1 @@ -731,24 +804,24 @@ func (cs *State) updateToState(state sm.State) { cs.state = state // Finally, broadcast RoundState - cs.newStep() + cs.newStep(ctx) } -func (cs *State) newStep() { +func (cs *State) newStep(ctx context.Context) { rs := cs.RoundStateEvent() if err := cs.wal.Write(rs); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } cs.nSteps++ // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { - if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { - cs.Logger.Error("failed publishing new round step", "err", err) + if err := cs.eventBus.PublishEventNewRoundStep(ctx, rs); err != nil { + cs.logger.Error("failed publishing new round step", "err", err) } - cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState) + cs.evsw.FireEvent(ctx, types.EventNewRoundStepValue, &cs.RoundState) } } @@ -760,7 +833,7 @@ func (cs *State) newStep() { // It keeps the RoundState and is the only thing that updates it. // Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. // State must be locked before any internal state is updated. -func (cs *State) receiveRoutine(maxSteps int) { +func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { onExit := func(cs *State) { // NOTE: the internalMsgQueue may have signed messages from our // priv_val that haven't hit the WAL, but its ok because @@ -768,7 +841,9 @@ func (cs *State) receiveRoutine(maxSteps int) { // close wal now that we're done writing to it if err := cs.wal.Stop(); err != nil { - cs.Logger.Error("failed trying to stop WAL", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + cs.logger.Error("failed trying to stop WAL", "error", err) + } } cs.wal.Wait() @@ -777,7 +852,7 @@ func (cs *State) receiveRoutine(maxSteps int) { defer func() { if r := recover(); r != nil { - cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) + cs.logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) // stop gracefully // // NOTE: We most probably shouldn't be running any further when there is @@ -793,7 +868,7 @@ func (cs *State) receiveRoutine(maxSteps int) { for { if maxSteps > 0 { if cs.nSteps >= maxSteps { - cs.Logger.Debug("reached max steps; exiting receive routine") + cs.logger.Debug("reached max steps; exiting receive routine") cs.nSteps = 0 return } @@ -804,16 +879,16 @@ func (cs *State) receiveRoutine(maxSteps int) { select { case <-cs.txNotifier.TxsAvailable(): - cs.handleTxsAvailable() + cs.handleTxsAvailable(ctx) case mi = <-cs.peerMsgQueue: if err := cs.wal.Write(mi); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } // handles proposals, block parts, votes // may generate internal events (votes, complete proposals, 2/3 majorities) - cs.handleMsg(mi) + cs.handleMsg(ctx, mi) case mi = <-cs.internalMsgQueue: err := cs.wal.WriteSync(mi) // NOTE: fsync @@ -824,35 +899,29 @@ func (cs *State) receiveRoutine(maxSteps int) { )) } - if _, ok := mi.Msg.(*VoteMessage); ok { - // we actually want to simulate failing during - // the previous WriteSync, but this isn't easy to do. - // Equivalent would be to fail here and manually remove - // some bytes from the end of the wal. - fail.Fail() // XXX - } - // handles proposals, block parts, votes - cs.handleMsg(mi) + cs.handleMsg(ctx, mi) case ti := <-cs.timeoutTicker.Chan(): // tockChan: if err := cs.wal.Write(ti); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } // if the timeout is relevant to the rs // go to the next step - cs.handleTimeout(ti, rs) + cs.handleTimeout(ctx, ti, rs) - case <-cs.Quit(): + case <-ctx.Done(): onExit(cs) return + } + // TODO should we handle context cancels here? } } // state transitions on complete-proposal, 2/3-any, 2/3-one -func (cs *State) handleMsg(mi msgInfo) { +func (cs *State) handleMsg(ctx context.Context, mi msgInfo) { cs.mtx.Lock() defer cs.mtx.Unlock() @@ -867,17 +936,21 @@ func (cs *State) handleMsg(mi msgInfo) { case *ProposalMessage: // will not cause transition. // once proposal is set, we can receive block parts - err = cs.setProposal(msg.Proposal) + err = cs.setProposal(msg.Proposal, mi.ReceiveTime) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - added, err = cs.addProposalBlockPart(msg, peerID) + added, err = cs.addProposalBlockPart(ctx, msg, peerID) if added { - cs.statsMsgQueue <- mi + select { + case cs.statsMsgQueue <- mi: + case <-ctx.Done(): + return + } } if err != nil && msg.Round != cs.Round { - cs.Logger.Debug( + cs.logger.Debug( "received block part from wrong round", "height", cs.Height, "cs_round", cs.Round, @@ -889,9 +962,13 @@ func (cs *State) handleMsg(mi msgInfo) { case *VoteMessage: // attempt to add the vote and dupeout the validator if its a duplicate signature // if the vote gives us a 2/3-any or 2/3-one, we transition - added, err = cs.tryAddVote(msg.Vote, peerID) + added, err = cs.tryAddVote(ctx, msg.Vote, peerID) if added { - cs.statsMsgQueue <- mi + select { + case cs.statsMsgQueue <- mi: + case <-ctx.Done(): + return + } } // if err == ErrAddingVote { @@ -910,12 +987,12 @@ func (cs *State) handleMsg(mi msgInfo) { // We could make note of this and help filter in broadcastHasVoteMessage(). default: - cs.Logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) + cs.logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) return } if err != nil { - cs.Logger.Error( + cs.logger.Error( "failed to process message", "height", cs.Height, "round", cs.Round, @@ -926,12 +1003,16 @@ func (cs *State) handleMsg(mi msgInfo) { } } -func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { - cs.Logger.Debug("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) +func (cs *State) handleTimeout( + ctx context.Context, + ti timeoutInfo, + rs cstypes.RoundState, +) { + cs.logger.Debug("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // timeouts must be for current height, round, step if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { - cs.Logger.Debug("ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) + cs.logger.Debug("ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) return } @@ -943,32 +1024,32 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { case cstypes.RoundStepNewHeight: // NewRound event fired from enterNewRound. // XXX: should we fire timeout here (for timeout commit)? - cs.enterNewRound(ti.Height, 0) + cs.enterNewRound(ctx, ti.Height, 0) case cstypes.RoundStepNewRound: - cs.enterPropose(ti.Height, 0) + cs.enterPropose(ctx, ti.Height, 0) case cstypes.RoundStepPropose: - if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout propose", "err", err) + if err := cs.eventBus.PublishEventTimeoutPropose(ctx, cs.RoundStateEvent()); err != nil { + cs.logger.Error("failed publishing timeout propose", "err", err) } - cs.enterPrevote(ti.Height, ti.Round) + cs.enterPrevote(ctx, ti.Height, ti.Round) case cstypes.RoundStepPrevoteWait: - if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + if err := cs.eventBus.PublishEventTimeoutWait(ctx, cs.RoundStateEvent()); err != nil { + cs.logger.Error("failed publishing timeout wait", "err", err) } - cs.enterPrecommit(ti.Height, ti.Round) + cs.enterPrecommit(ctx, ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: - if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + if err := cs.eventBus.PublishEventTimeoutWait(ctx, cs.RoundStateEvent()); err != nil { + cs.logger.Error("failed publishing timeout wait", "err", err) } - cs.enterPrecommit(ti.Height, ti.Round) - cs.enterNewRound(ti.Height, ti.Round+1) + cs.enterPrecommit(ctx, ti.Height, ti.Round) + cs.enterNewRound(ctx, ti.Height, ti.Round+1) default: panic(fmt.Sprintf("invalid timeout step: %v", ti.Step)) @@ -976,7 +1057,7 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { } -func (cs *State) handleTxsAvailable() { +func (cs *State) handleTxsAvailable(ctx context.Context) { cs.mtx.Lock() defer cs.mtx.Unlock() @@ -997,7 +1078,7 @@ func (cs *State) handleTxsAvailable() { cs.scheduleTimeout(timeoutCommit, cs.Height, 0, cstypes.RoundStepNewRound) case cstypes.RoundStepNewRound: // after timeoutCommit - cs.enterPropose(cs.Height, 0) + cs.enterPropose(ctx, cs.Height, 0) } } @@ -1006,13 +1087,13 @@ func (cs *State) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) // NOTE: cs.StartTime was already set for height. -func (cs *State) enterNewRound(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { logger.Debug( @@ -1047,6 +1128,7 @@ func (cs *State) enterNewRound(height int64, round int32) { } else { logger.Debug("resetting proposal info") cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil } @@ -1054,8 +1136,8 @@ func (cs *State) enterNewRound(height int64, round int32) { cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false - if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { - cs.Logger.Error("failed publishing new round", "err", err) + if err := cs.eventBus.PublishEventNewRound(ctx, cs.NewRoundEvent()); err != nil { + cs.logger.Error("failed publishing new round", "err", err) } cs.metrics.Rounds.Set(float64(round)) @@ -1069,9 +1151,10 @@ func (cs *State) enterNewRound(height int64, round int32) { cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) } - } else { - cs.enterPropose(height, round) + return } + + cs.enterPropose(ctx, height, round) } // needProofBlock returns true on the first height (so the genesis app hash is signed right away) @@ -1091,10 +1174,10 @@ func (cs *State) needProofBlock(height int64) bool { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): -// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool -func (cs *State) enterPropose(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { logger.Debug( @@ -1104,18 +1187,28 @@ func (cs *State) enterPropose(height int64, round int32) { return } + // If this validator is the proposer of this round, and the previous block time is later than + // our local clock time, wait to propose until our local clock time has passed the block time. + if cs.privValidatorPubKey != nil && cs.isProposer(cs.privValidatorPubKey.Address()) { + proposerWaitTime := proposerWaitTime(tmtime.DefaultSource{}, cs.state.LastBlockTime) + if proposerWaitTime > 0 { + cs.scheduleTimeout(proposerWaitTime, height, round, cstypes.RoundStepNewRound) + return + } + } + logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPropose: cs.updateRoundStep(round, cstypes.RoundStepPropose) - cs.newStep() + cs.newStep(ctx) // If we have the whole proposal + POL, then goto Prevote now. // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), // or else after timeoutPropose if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round) + cs.enterPrevote(ctx, height, cs.Round) } }() @@ -1124,12 +1217,10 @@ func (cs *State) enterPropose(height int64, round int32) { // Nothing more to do if we're not a validator if cs.privValidator == nil { - logger.Debug("node is not a validator") + logger.Debug("propose step; not proposing since node is not a validator") return } - logger.Debug("node is a validator") - if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. @@ -1137,21 +1228,23 @@ func (cs *State) enterPropose(height int64, round int32) { return } - address := cs.privValidatorPubKey.Address() + addr := cs.privValidatorPubKey.Address() // if not a validator, we're done - if !cs.Validators.HasAddress(address) { - logger.Debug("node is not a validator", "addr", address, "vals", cs.Validators) + if !cs.Validators.HasAddress(addr) { + logger.Debug("propose step; not proposing since node is not in the validator set", + "addr", addr, + "vals", cs.Validators) return } - if cs.isProposer(address) { + if cs.isProposer(addr) { logger.Debug( "propose step; our turn to propose", - "proposer", address, + "proposer", addr, ) - cs.decideProposal(height, round) + cs.decideProposal(ctx, height, round) } else { logger.Debug( "propose step; not our turn to propose", @@ -1164,7 +1257,7 @@ func (cs *State) isProposer(address []byte) bool { return bytes.Equal(cs.Validators.GetProposer().Address, address) } -func (cs *State) defaultDecideProposal(height int64, round int32) { +func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round int32) { var block *types.Block var blockParts *types.PartSet @@ -1174,8 +1267,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { block, blockParts = cs.ValidBlock, cs.ValidBlockParts } else { // Create a new proposal block from state/txs from the mempool. - block, blockParts = cs.createProposalBlock() - if block == nil { + var err error + block, blockParts, err = cs.createProposalBlock() + if block == nil || err != nil { return } } @@ -1183,31 +1277,31 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. if err := cs.wal.FlushAndSync(); err != nil { - cs.Logger.Error("failed flushing WAL to disk") + cs.logger.Error("failed flushing WAL to disk") } // Make proposal propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() // wait the max amount we would wait for a proposal - ctx, cancel := context.WithTimeout(context.TODO(), cs.config.TimeoutPropose) + ctxto, cancel := context.WithTimeout(ctx, cs.config.TimeoutPropose) defer cancel() - if err := cs.privValidator.SignProposal(ctx, cs.state.ChainID, p); err == nil { + if err := cs.privValidator.SignProposal(ctxto, cs.state.ChainID, p); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + cs.sendInternalMessage(ctx, msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + cs.sendInternalMessage(ctx, msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, "", tmtime.Now()}) } - cs.Logger.Debug("signed proposal", "height", height, "round", round, "proposal", proposal) + cs.logger.Debug("signed proposal", "height", height, "round", round, "proposal", proposal) } else if !cs.replayMode { - cs.Logger.Error("propose step; failed signing proposal", "height", height, "round", round, "err", err) + cs.logger.Error("propose step; failed signing proposal", "height", height, "round", round, "err", err) } } @@ -1234,12 +1328,13 @@ func (cs *State) isProposalComplete() bool { // // NOTE: keep it side-effect free for clarity. // CONTRACT: cs.privValidator is not nil. -func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { +func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet, err error) { if cs.privValidator == nil { - panic("entered createProposalBlock with privValidator being nil") + return nil, nil, errors.New("entered createProposalBlock with privValidator being nil") } var commit *types.Commit + var votes []*types.Vote switch { case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. @@ -1249,30 +1344,34 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() + votes = cs.LastCommit.GetVotes() default: // This shouldn't happen. - cs.Logger.Error("propose step; cannot propose anything without commit for the previous block") + cs.logger.Error("propose step; cannot propose anything without commit for the previous block") return } if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - cs.Logger.Error("propose step; empty priv validator public key", "err", errPubKeyIsNotSet) + cs.logger.Error("propose step; empty priv validator public key", "err", errPubKeyIsNotSet) return } proposerAddr := cs.privValidatorPubKey.Address() - return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) + return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr, votes) } // Enter: `timeoutPropose` after entering Propose. // Enter: proposal block and POL is ready. -// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. -// Otherwise vote nil. -func (cs *State) enterPrevote(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +// If we received a valid proposal within this round and we are not locked on a block, +// we will prevote for block. +// Otherwise, if we receive a valid proposal that matches the block we are +// locked on or matches a block that received a POL in a round later than our +// locked round, prevote for the proposal, otherwise vote nil. +func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { logger.Debug( @@ -1285,32 +1384,60 @@ func (cs *State) enterPrevote(height int64, round int32) { defer func() { // Done enterPrevote: cs.updateRoundStep(round, cstypes.RoundStepPrevote) - cs.newStep() + cs.newStep(ctx) }() logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) // Sign and broadcast vote as necessary - cs.doPrevote(height, round) + cs.doPrevote(ctx, height, round) // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait // (so we have more time to try and collect +2/3 prevotes for a single block) } -func (cs *State) defaultDoPrevote(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) proposalIsTimely() bool { + sp := types.SynchronyParams{ + Precision: cs.state.ConsensusParams.Synchrony.Precision, + MessageDelay: cs.state.ConsensusParams.Synchrony.MessageDelay, + } + + return cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp, cs.Round) +} + +func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) - // If a block is locked, prevote that. - if cs.LockedBlock != nil { - logger.Debug("prevote step; already locked on a block; prevoting locked block") - cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + // Check that a proposed block was not received within this round (and thus executing this from a timeout). + if cs.ProposalBlock == nil { + logger.Debug("prevote step: ProposalBlock is nil; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } - // If ProposalBlock is nil, prevote nil. - if cs.ProposalBlock == nil { - logger.Debug("prevote step: ProposalBlock is nil") - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + if cs.Proposal == nil { + logger.Debug("prevote step: did not receive proposal; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) { + logger.Debug("prevote step: proposal timestamp not equal; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { + logger.Debug("prevote step: Proposal is not timely; prevoting nil", + "proposed", + tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), + "received", + tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), + "msg_delay", + cs.state.ConsensusParams.Synchrony.MessageDelay, + "precision", + cs.state.ConsensusParams.Synchrony.Precision) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1318,21 +1445,77 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) if err != nil { // ProposalBlock is invalid, prevote nil. - logger.Error("prevote step: ProposalBlock is invalid", "err", err) - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + logger.Error("prevote step: ProposalBlock is invalid; prevoting nil", "err", err) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } - // Prevote cs.ProposalBlock - // NOTE: the proposal signature is validated when it is received, - // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) - logger.Debug("prevote step: ProposalBlock is valid") - cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + /* + 22: upon from proposer(h_p, round_p) while step_p = propose do + 23: if valid(v) && (lockedRound_p = −1 || lockedValue_p = v) then + 24: broadcast + + Here, cs.Proposal.POLRound corresponds to the -1 in the above algorithm rule. + This means that the proposer is producing a new proposal that has not previously + seen a 2/3 majority by the network. + + If we have already locked on a different value that is different from the proposed value, + we prevote nil since we are locked on a different value. Otherwise, if we're not locked on a block + or the proposal matches our locked block, we prevote the proposal. + */ + if cs.Proposal.POLRound == -1 { + if cs.LockedRound == -1 { + logger.Debug("prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { + logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + } + + /* + 28: upon from proposer(h_p, round_p) AND 2f + 1 while + step_p = propose && (v_r ≥ 0 && v_r < round_p) do + 29: if valid(v) && (lockedRound_p ≤ v_r || lockedValue_p = v) then + 30: broadcast + + This rule is a bit confusing but breaks down as follows: + + If we see a proposal in the current round for value 'v' that lists its valid round as 'v_r' + AND this validator saw a 2/3 majority of the voting power prevote 'v' in round 'v_r', then we will + issue a prevote for 'v' in this round if 'v' is valid and either matches our locked value OR + 'v_r' is a round greater than or equal to our current locked round. + + 'v_r' can be a round greater than to our current locked round if a 2/3 majority of + the network prevoted a value in round 'v_r' but we did not lock on it, possibly because we + missed the proposal in round 'v_r'. + */ + blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority() + if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round { + if cs.LockedRound <= cs.Proposal.POLRound { + logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" + + "majority in a round later than the locked round; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { + logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + } + + logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or " + + "did not receive a more recent majority; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) } // Enter: any +2/3 prevotes at next round. -func (cs *State) enterPrevoteWait(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPrevoteWait(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { logger.Debug( @@ -1354,7 +1537,7 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { defer func() { // Done enterPrevoteWait: cs.updateRoundStep(round, cstypes.RoundStepPrevoteWait) - cs.newStep() + cs.newStep(ctx) }() // Wait for some more prevotes; enterPrecommit @@ -1365,10 +1548,9 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { // Enter: `timeoutPrecommit` after any +2/3 precommits. // Enter: +2/3 precomits for block or nil. // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) -// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. -func (cs *State) enterPrecommit(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { logger.Debug( @@ -1383,7 +1565,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { defer func() { // Done enterPrecommit: cs.updateRoundStep(round, cstypes.RoundStepPrecommit) - cs.newStep() + cs.newStep(ctx) }() // check for a polka @@ -1397,12 +1579,12 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") } - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) return } // At this point +2/3 prevoted for a particular block or nil. - if err := cs.eventBus.PublishEventPolka(cs.RoundStateEvent()); err != nil { + if err := cs.eventBus.PublishEventPolka(ctx, cs.RoundStateEvent()); err != nil { logger.Error("failed publishing polka", "err", err) } @@ -1412,85 +1594,79 @@ func (cs *State) enterPrecommit(height int64, round int32) { panic(fmt.Sprintf("this POLRound should be %v but got %v", round, polRound)) } - // +2/3 prevoted nil. Unlock and precommit nil. - if len(blockID.Hash) == 0 { - if cs.LockedBlock == nil { - logger.Debug("precommit step; +2/3 prevoted for nil") - } else { - logger.Debug("precommit step; +2/3 prevoted for nil; unlocking") - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event unlock", "err", err) - } - } + // +2/3 prevoted nil. Precommit nil. + if blockID.IsNil() { + logger.Debug("precommit step: +2/3 prevoted for nil; precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + // At this point, +2/3 prevoted for a particular block. - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + // If we never received a proposal for this block, we must precommit nil + if cs.Proposal == nil || cs.ProposalBlock == nil { + logger.Debug("precommit step; did not receive proposal, precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) return } - // At this point, +2/3 prevoted for a particular block. + // If the proposal time does not match the block time, precommit nil. + if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) { + logger.Debug("precommit step: proposal timestamp not equal; precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } // If we're already locked on that block, precommit it, and update the LockedRound if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted locked block; relocking") + logger.Debug("precommit step: +2/3 prevoted locked block; relocking") cs.LockedRound = round - if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event relock", "err", err) + if err := cs.eventBus.PublishEventRelock(ctx, cs.RoundStateEvent()); err != nil { + logger.Error("precommit step: failed publishing event relock", "err", err) } - cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } - // If +2/3 prevoted for proposal block, stage and precommit it + // If greater than 2/3 of the voting power on the network prevoted for + // the proposed block, update our locked block to this block and issue a + // precommit vote for it. if cs.ProposalBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted proposal block; locking", "hash", blockID.Hash) + logger.Debug("precommit step: +2/3 prevoted proposal block; locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err)) + panic(fmt.Sprintf("precommit step: +2/3 prevoted for an invalid block %v; relocking", err)) } cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock cs.LockedBlockParts = cs.ProposalBlockParts - if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event lock", "err", err) + if err := cs.eventBus.PublishEventLock(ctx, cs.RoundStateEvent()); err != nil { + logger.Error("precommit step: failed publishing event lock", "err", err) } - cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } // There was a polka in this round for a block we don't have. - // Fetch that block, unlock, and precommit nil. - // The +2/3 prevotes for this round is the POL for our unlock. - logger.Debug("precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) - - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil + // Fetch that block, and precommit nil. + logger.Debug("precommit step: +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event unlock", "err", err) - } - - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. -func (cs *State) enterPrecommitWait(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPrecommitWait(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { logger.Debug( @@ -1513,7 +1689,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { defer func() { // Done enterPrecommitWait: cs.TriggeredTimeoutPrecommit = true - cs.newStep() + cs.newStep(ctx) }() // wait for some more precommits; enterNewRound @@ -1521,8 +1697,8 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { } // Enter: +2/3 precommits for block -func (cs *State) enterCommit(height int64, commitRound int32) { - logger := cs.Logger.With("height", height, "commit_round", commitRound) +func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int32) { + logger := cs.logger.With("height", height, "commit_round", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { logger.Debug( @@ -1540,10 +1716,10 @@ func (cs *State) enterCommit(height int64, commitRound int32) { cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) cs.CommitRound = commitRound cs.CommitTime = tmtime.Now() - cs.newStep() + cs.newStep(ctx) // Maybe finalize immediately. - cs.tryFinalizeCommit(height) + cs.tryFinalizeCommit(ctx, height) }() blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() @@ -1574,25 +1750,25 @@ func (cs *State) enterCommit(height int64, commitRound int32) { cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) - if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + if err := cs.eventBus.PublishEventValidBlock(ctx, cs.RoundStateEvent()); err != nil { logger.Error("failed publishing valid block", "err", err) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) + cs.evsw.FireEvent(ctx, types.EventValidBlockValue, &cs.RoundState) } } } // If we have the block AND +2/3 commits for it, finalize. -func (cs *State) tryFinalizeCommit(height int64) { - logger := cs.Logger.With("height", height) +func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { + logger := cs.logger.With("height", height) if cs.Height != height { panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() - if !ok || len(blockID.Hash) == 0 { + if !ok || blockID.IsNil() { logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") return } @@ -1608,12 +1784,12 @@ func (cs *State) tryFinalizeCommit(height int64) { return } - cs.finalizeCommit(height) + cs.finalizeCommit(ctx, height) } // Increment height and goto cstypes.RoundStepNewHeight -func (cs *State) finalizeCommit(height int64) { - logger := cs.Logger.With("height", height) +func (cs *State) finalizeCommit(ctx context.Context, height int64) { + logger := cs.logger.With("height", height) if cs.Height != height || cs.Step != cstypes.RoundStepCommit { logger.Debug( @@ -1623,6 +1799,8 @@ func (cs *State) finalizeCommit(height int64) { return } + cs.calculatePrevoteMessageDelayMetrics() + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts @@ -1648,8 +1826,6 @@ func (cs *State) finalizeCommit(height int64) { ) logger.Debug(fmt.Sprintf("%v", block)) - fail.Fail() // XXX - // Save to blockStore. if cs.blockStore.Height() < block.Height { // NOTE: the seenCommit is local justification to commit this block, @@ -1662,8 +1838,6 @@ func (cs *State) finalizeCommit(height int64) { logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) } - fail.Fail() // XXX - // Write EndHeightMessage{} for this height, implying that the blockstore // has saved the block. // @@ -1685,14 +1859,12 @@ func (cs *State) finalizeCommit(height int64) { )) } - fail.Fail() // XXX - // Create a copy of the state for staging and an event cache for txs. stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. - stateCopy, err := cs.blockExec.ApplyBlock( + stateCopy, err := cs.blockExec.ApplyBlock(ctx, stateCopy, types.BlockID{ Hash: block.Hash(), @@ -1705,18 +1877,14 @@ func (cs *State) finalizeCommit(height int64) { return } - fail.Fail() // XXX - // must be called before we update state cs.RecordMetrics(height, block) // NewHeightStep! - cs.updateToState(stateCopy) - - fail.Fail() // XXX + cs.updateToState(ctx, stateCopy) // Private validator might have changed it's key pair => refetch pubkey. - if err := cs.updatePrivValidatorPubKey(); err != nil { + if err := cs.updatePrivValidatorPubKey(ctx); err != nil { logger.Error("failed to get private validator pubkey", "err", err) } @@ -1750,7 +1918,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { address types.Address ) if commitSize != valSetLen { - cs.Logger.Error(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", + cs.logger.Error(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) return } @@ -1758,7 +1926,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { if cs.privValidator != nil { if cs.privValidatorPubKey == nil { // Metrics won't be updated, but it's not critical. - cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet)) + cs.logger.Error("recordMetrics", "err", errPubKeyIsNotSet) } else { address = cs.privValidatorPubKey.Address() } @@ -1822,10 +1990,10 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { //----------------------------------------------------------------------------- -func (cs *State) defaultSetProposal(proposal *types.Proposal) error { +func (cs *State) defaultSetProposal(proposal *types.Proposal, recvTime time.Time) error { // Already have one // TODO: possibly catch double proposals - if cs.Proposal != nil { + if cs.Proposal != nil || proposal == nil { return nil } @@ -1850,6 +2018,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { proposal.Signature = p.Signature cs.Proposal = proposal + cs.ProposalReceiveTime = recvTime + cs.calculateProposalTimestampDifferenceMetric() // We don't update cs.ProposalBlockParts if it is already set. // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! @@ -1857,19 +2027,23 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.logger.Info("received proposal", "proposal", proposal) return nil } // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, // once we have the full block. -func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) { +func (cs *State) addProposalBlockPart( + ctx context.Context, + msg *BlockPartMessage, + peerID types.NodeID, +) (added bool, err error) { height, round, part := msg.Height, msg.Round, msg.Part // Blocks might be reused, so round mismatch is OK if cs.Height != height { - cs.Logger.Debug("received block part from wrong height", "height", height, "round", round) + cs.logger.Debug("received block part from wrong height", "height", height, "round", round) return false, nil } @@ -1877,7 +2051,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID if cs.ProposalBlockParts == nil { // NOTE: this can happen when we've gone to a higher round and // then receive parts from the previous round - not necessarily a bad peer. - cs.Logger.Debug( + cs.logger.Debug( "received a block part when we are not expecting any", "height", height, "round", round, @@ -1897,7 +2071,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID ) } if added && cs.ProposalBlockParts.IsComplete() { - bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + bz, err := io.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } @@ -1916,18 +2090,18 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID cs.ProposalBlock = block // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal - cs.Logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + cs.logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) - if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { - cs.Logger.Error("failed publishing event complete proposal", "err", err) + if err := cs.eventBus.PublishEventCompleteProposal(ctx, cs.CompleteProposalEvent()); err != nil { + cs.logger.Error("failed publishing event complete proposal", "err", err) } // Update Valid* if we can. prevotes := cs.Votes.Prevotes(cs.Round) blockID, hasTwoThirds := prevotes.TwoThirdsMajority() - if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { + if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Debug( + cs.logger.Debug( "updating valid block to new proposal block", "valid_round", cs.Round, "valid_block_hash", cs.ProposalBlock.Hash(), @@ -1946,13 +2120,13 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { // Move onto the next step - cs.enterPrevote(height, cs.Round) + cs.enterPrevote(ctx, height, cs.Round) if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added - cs.enterPrecommit(height, cs.Round) + cs.enterPrecommit(ctx, height, cs.Round) } } else if cs.Step == cstypes.RoundStepCommit { // If we're waiting on the proposal block... - cs.tryFinalizeCommit(height) + cs.tryFinalizeCommit(ctx, height) } return added, nil @@ -1962,8 +2136,8 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID } // Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) { - added, err := cs.addVote(vote, peerID) +func (cs *State) tryAddVote(ctx context.Context, vote *types.Vote, peerID types.NodeID) (bool, error) { + added, err := cs.addVote(ctx, vote, peerID) if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. @@ -1975,7 +2149,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) } if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { - cs.Logger.Error( + cs.logger.Error( "found conflicting vote from ourselves; did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, @@ -1987,7 +2161,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) // report conflicting votes to the evidence pool cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB) - cs.Logger.Debug( + cs.logger.Debug( "found and sent conflicting votes to the evidence pool", "vote_a", voteErr.VoteA, "vote_b", voteErr.VoteB, @@ -1995,14 +2169,14 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) return added, err } else if errors.Is(err, types.ErrVoteNonDeterministicSignature) { - cs.Logger.Debug("vote has non-deterministic signature", "err", err) + cs.logger.Debug("vote has non-deterministic signature", "err", err) } else { // Either // 1) bad peer OR // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR // 3) tmkms use with multiple validators connecting to a single tmkms instance - // (https://github.com/tendermint/tendermint/issues/3839). - cs.Logger.Info("failed attempting to add vote", "err", err) + // (https://github.com/tendermint/tendermint/issues/3839). + cs.logger.Info("failed attempting to add vote", "err", err) return added, ErrAddingVote } } @@ -2010,8 +2184,12 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) return added, nil } -func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { - cs.Logger.Debug( +func (cs *State) addVote( + ctx context.Context, + vote *types.Vote, + peerID types.NodeID, +) (added bool, err error) { + cs.logger.Debug( "adding vote", "vote_height", vote.Height, "vote_type", vote.Type, @@ -2024,7 +2202,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { if cs.Step != cstypes.RoundStepNewHeight { // Late precommit at prior height is ignored - cs.Logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) + cs.logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) return } @@ -2033,18 +2211,18 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err return } - cs.Logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) - if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + cs.logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) + if err := cs.eventBus.PublishEventVote(ctx, types.EventDataVote{Vote: vote}); err != nil { return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(ctx, types.EventVoteValue, vote) // if we can skip timeoutCommit and have all the votes now, if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { // go straight to new round (skip timeout commit) // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) - cs.enterNewRound(cs.Height, 0) + cs.enterNewRound(ctx, cs.Height, 0) } return @@ -2053,10 +2231,17 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err // Height mismatch is ignored. // Not necessarily a bad peer, but not favorable behavior. if vote.Height != cs.Height { - cs.Logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) + cs.logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) return } + // Verify VoteExtension if precommit + if vote.Type == tmproto.PrecommitType { + if err = cs.blockExec.VerifyVoteExtension(vote); err != nil { + return false, err + } + } + height := cs.Height added, err = cs.Votes.AddVote(vote, peerID) if !added { @@ -2064,50 +2249,30 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err return } - if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + if err := cs.eventBus.PublishEventVote(ctx, types.EventDataVote{Vote: vote}); err != nil { return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(ctx, types.EventVoteValue, vote) switch vote.Type { case tmproto.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) - cs.Logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort()) - - // If +2/3 prevotes for a block or nil for *any* round: - if blockID, ok := prevotes.TwoThirdsMajority(); ok { - // There was a polka! - // If we're locked but this is a recent polka, unlock. - // If it matches our ProposalBlock, update the ValidBlock - - // Unlock if `cs.LockedRound < vote.Round <= cs.Round` - // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round - if (cs.LockedBlock != nil) && - (cs.LockedRound < vote.Round) && - (vote.Round <= cs.Round) && - !cs.LockedBlock.HashesTo(blockID.Hash) { - - cs.Logger.Debug("unlocking because of POL", "locked_round", cs.LockedRound, "pol_round", vote.Round) + cs.logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort()) - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - return added, err - } - } + // Check to see if >2/3 of the voting power on the network voted for any non-nil block. + if blockID, ok := prevotes.TwoThirdsMajority(); ok && !blockID.IsNil() { + // Greater than 2/3 of the voting power on the network voted for some + // non-nil block // Update Valid* if we can. - // NOTE: our proposal block may be nil or not what received a polka.. - if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + if cs.ValidRound < vote.Round && vote.Round == cs.Round { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) + cs.logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) cs.ValidRound = vote.Round cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts } else { - cs.Logger.Debug( + cs.logger.Debug( "valid block we do not know about; set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "block_id", blockID.Hash, @@ -2121,8 +2286,8 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) - if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + cs.evsw.FireEvent(ctx, types.EventValidBlockValue, &cs.RoundState) + if err := cs.eventBus.PublishEventValidBlock(ctx, cs.RoundStateEvent()); err != nil { return added, err } } @@ -2132,26 +2297,26 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err switch { case cs.Round < vote.Round && prevotes.HasTwoThirdsAny(): // Round-skip if there is any 2/3+ of votes ahead of us - cs.enterNewRound(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round blockID, ok := prevotes.TwoThirdsMajority() - if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { - cs.enterPrecommit(height, vote.Round) + if ok && (cs.isProposalComplete() || blockID.IsNil()) { + cs.enterPrecommit(ctx, height, vote.Round) } else if prevotes.HasTwoThirdsAny() { - cs.enterPrevoteWait(height, vote.Round) + cs.enterPrevoteWait(ctx, height, vote.Round) } case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round: // If the proposal is now complete, enter prevote of cs.Round. if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round) + cs.enterPrevote(ctx, height, cs.Round) } } case tmproto.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) - cs.Logger.Debug("added vote to precommit", + cs.logger.Debug("added vote to precommit", "height", vote.Height, "round", vote.Round, "validator", vote.ValidatorAddress.String(), @@ -2161,20 +2326,20 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err blockID, ok := precommits.TwoThirdsMajority() if ok { // Executed as TwoThirdsMajority could be from a higher round - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) + cs.enterPrecommit(ctx, height, vote.Round) - if len(blockID.Hash) != 0 { - cs.enterCommit(height, vote.Round) + if !blockID.IsNil() { + cs.enterCommit(ctx, height, vote.Round) if cs.config.SkipTimeoutCommit && precommits.HasAll() { - cs.enterNewRound(cs.Height, 0) + cs.enterNewRound(ctx, cs.Height, 0) } } else { - cs.enterPrecommitWait(height, vote.Round) + cs.enterPrecommitWait(ctx, height, vote.Round) } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommitWait(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) + cs.enterPrecommitWait(ctx, height, vote.Round) } default: @@ -2186,6 +2351,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err // CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( + ctx context.Context, msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, @@ -2208,13 +2374,11 @@ func (cs *State) signVote( ValidatorIndex: valIdx, Height: cs.Height, Round: cs.Round, - Timestamp: cs.voteTime(), + Timestamp: tmtime.Now(), Type: msgType, BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } - v := vote.ToProto() - // If the signedMessageType is for precommit, // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. var timeout time.Duration @@ -2222,55 +2386,44 @@ func (cs *State) signVote( switch msgType { case tmproto.PrecommitType: timeout = cs.config.TimeoutPrecommit + // if the signedMessage type is for a precommit, add VoteExtension + ext, err := cs.blockExec.ExtendVote(vote) + if err != nil { + return nil, err + } + vote.VoteExtension = ext case tmproto.PrevoteType: timeout = cs.config.TimeoutPrevote default: timeout = time.Second } - ctx, cancel := context.WithTimeout(context.TODO(), timeout) + v := vote.ToProto() + + ctxto, cancel := context.WithTimeout(ctx, timeout) defer cancel() - err := cs.privValidator.SignVote(ctx, cs.state.ChainID, v) + err := cs.privValidator.SignVote(ctxto, cs.state.ChainID, v) vote.Signature = v.Signature vote.Timestamp = v.Timestamp return vote, err } -// voteTime ensures monotonicity of the time a validator votes on. -// It ensures that for a prior block with a BFT-timestamp of T, -// any vote from this validator will have time at least time T + 1ms. -// This is needed, as monotonicity of time is a guarantee that BFT time provides. -func (cs *State) voteTime() time.Time { - now := tmtime.Now() - minVoteTime := now - // Minimum time increment between blocks - const timeIota = time.Millisecond - // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, - // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. - if cs.LockedBlock != nil { - // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html - minVoteTime = cs.LockedBlock.Time.Add(timeIota) - } else if cs.ProposalBlock != nil { - minVoteTime = cs.ProposalBlock.Time.Add(timeIota) - } - - if now.After(minVoteTime) { - return now - } - return minVoteTime -} - // sign the vote and publish on internalMsgQueue -func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *State) signAddVote( + ctx context.Context, + msgType tmproto.SignedMsgType, + hash []byte, + header types.PartSetHeader, +) *types.Vote { if cs.privValidator == nil { // the node does not have a key return nil } if cs.privValidatorPubKey == nil { // Vote won't be signed, but it's not critical. - cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet)) + cs.logger.Error("signAddVote", "err", errPubKeyIsNotSet) return nil } @@ -2280,21 +2433,21 @@ func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header } // TODO: pass pubKey to signVote - vote, err := cs.signVote(msgType, hash, header) + vote, err := cs.signVote(ctx, msgType, hash, header) if err == nil { - cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) - cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) + cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) + cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) return vote } - cs.Logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) return nil } // updatePrivValidatorPubKey get's the private validator public key and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. -func (cs *State) updatePrivValidatorPubKey() error { +func (cs *State) updatePrivValidatorPubKey(rctx context.Context) error { if cs.privValidator == nil { return nil } @@ -2313,9 +2466,9 @@ func (cs *State) updatePrivValidatorPubKey() error { // set context timeout depending on the configuration and the State step, // this helps in avoiding blocking of the remote signer connection. - ctx, cancel := context.WithTimeout(context.TODO(), timeout) + ctxto, cancel := context.WithTimeout(rctx, timeout) defer cancel() - pubKey, err := cs.privValidator.GetPubKey(ctx) + pubKey, err := cs.privValidator.GetPubKey(ctxto) if err != nil { return err } @@ -2337,7 +2490,7 @@ func (cs *State) checkDoubleSigningRisk(height int64) error { if lastCommit != nil { for sigIdx, s := range lastCommit.Signatures { if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { - cs.Logger.Info("found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) + cs.logger.Info("found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) return ErrSignatureFoundInPastBlocks } } @@ -2348,6 +2501,31 @@ func (cs *State) checkDoubleSigningRisk(height int64) error { return nil } +func (cs *State) calculatePrevoteMessageDelayMetrics() { + if cs.Proposal == nil { + return + } + ps := cs.Votes.Prevotes(cs.Round) + pl := ps.List() + + sort.Slice(pl, func(i, j int) bool { + return pl[i].Timestamp.Before(pl[j].Timestamp) + }) + + var votingPowerSeen int64 + for _, v := range pl { + _, val := cs.Validators.GetByAddress(v.ValidatorAddress) + votingPowerSeen += val.VotingPower + if votingPowerSeen >= cs.Validators.TotalVotingPower()*2/3+1 { + cs.metrics.QuorumPrevoteMessageDelay.Set(v.Timestamp.Sub(cs.Proposal.Timestamp).Seconds()) + break + } + } + if ps.HasAll() { + cs.metrics.FullPrevoteMessageDelay.Set(pl[len(pl)-1].Timestamp.Sub(cs.Proposal.Timestamp).Seconds()) + } +} + //--------------------------------------------------------- func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int { @@ -2404,3 +2582,30 @@ func repairWalFile(src, dst string) error { return nil } + +func (cs *State) calculateProposalTimestampDifferenceMetric() { + if cs.Proposal != nil && cs.Proposal.POLRound == -1 { + tp := types.SynchronyParams{ + Precision: cs.state.ConsensusParams.Synchrony.Precision, + MessageDelay: cs.state.ConsensusParams.Synchrony.MessageDelay, + } + + isTimely := cs.Proposal.IsTimely(cs.ProposalReceiveTime, tp, cs.Round) + cs.metrics.ProposalTimestampDifference.With("is_timely", fmt.Sprintf("%t", isTimely)). + Observe(cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp).Seconds()) + } +} + +// proposerWaitTime determines how long the proposer should wait to propose its next block. +// If the result is zero, a block can be proposed immediately. +// +// Block times must be monotonically increasing, so if the block time of the previous +// block is larger than the proposer's current time, then the proposer will sleep +// until its local clock exceeds the previous block time. +func proposerWaitTime(lt tmtime.Source, bt time.Time) time.Duration { + t := lt.Now() + if bt.After(t) { + return bt.Sub(t) + } + return 0 +} diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index a1db8276db..78f2ad0a52 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -13,9 +13,12 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" + tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -34,16 +37,23 @@ x * TestFullRound1 - 1 val, full successful round x * TestFullRoundNil - 1 val, full round of nil x * TestFullRound2 - 2 vals, both required for full round LockSuite -x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. -x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil -x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round -x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round +x * TestStateLock_NoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. +x * TestStateLock_POLUpdateLock - 4 vals, one precommits, +other 3 polka at next round, so we unlock and precomit the polka +x * TestStateLock_POLRelock - 4 vals, polka in round 1 and polka in round 2. +Ensure validator updates locked round. +x_*_TestStateLock_POLDoesNotUnlock 4 vals, one precommits, other 3 polka nil at +next round, so we precommit nil but maintain lock +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock - 4 vals, 1 misses proposal but sees POL. +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock - 4 vals, 1 misses proposal but sees POL. +x * TestStateLock_POLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round +x * TestStateLock_POLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round +x_*_TestState_PrevotePOLFromPreviousRound 4 vals, prevote a proposal if a POL was seen for it in a previous round. * TestNetworkLock - once +1/3 precommits, network should be locked * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed SlashingSuite -x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed -x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed +x * TestStateSlashing_Prevotes - a validator prevoting twice in a round gets slashed +x * TestStateSlashing_Precommits - a validator precomitting twice in a round gets slashed CatchupSuite * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote HaltSuite @@ -55,80 +65,82 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh // ProposeSuite func TestStateProposerSelection0(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) + logger := log.NewNopLogger() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // Wait for new round so proposer is set. - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - pv, err := cs1.privValidator.GetPubKey(context.Background()) + pv, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) address := pv.Address() - if !bytes.Equal(prop.Address, address) { - t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) - } + require.Truef(t, bytes.Equal(prop.Address, address), "expected proposer to be validator %d. Got %X", 0, prop.Address) // Wait for complete proposal. - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + }, vss[1:]...) // Wait for new round so next validator is set. - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - pv1, err := vss[1].GetPubKey(context.Background()) + pv1, err := vss[1].GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - if !bytes.Equal(prop.Address, addr) { - panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) - } + require.True(t, bytes.Equal(prop.Address, addr), "expected proposer to be validator %d. Got %X", 1, prop.Address) } // Now let's do it all again, but starting from round 2 instead of 0 func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() - cs1, vss := randState(config, 4) // test needs more work for more than 3 validators + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cs1, vss := makeState(ctx, t, config, logger, 4) // test needs more work for more than 3 validators height := cs1.Height - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) var round int32 = 2 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewRound(newRoundCh, height, round) // wait for the new round + ensureNewRound(t, newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - pvk, err := vss[int(i+round)%len(vss)].GetPubKey(context.Background()) + pvk, err := vss[int(i+round)%len(vss)].GetPubKey(ctx) require.NoError(t, err) addr := pvk.Address() correctProposer := addr - if !bytes.Equal(prop.Address, correctProposer) { - panic(fmt.Sprintf( - "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", - int(i+2)%len(vss), - prop.Address)) - } + require.True(t, bytes.Equal(prop.Address, correctProposer), + "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", + int(i+2)%len(vss), + prop.Address) - rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) + ensureNewRound(t, newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -137,18 +149,21 @@ func TestStateProposerSelection2(t *testing.T) { // a non-validator should timeout into the prevote round func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _ := randState(config, 1) - cs.SetPrivValidator(nil) + cs, _ := makeState(ctx, t, config, logger, 1) + cs.SetPrivValidator(ctx, nil) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) // if we're not a validator, EnterPropose should timeout - ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) if cs.GetRoundState().Proposal != nil { t.Error("Expected to make no proposal, since no privValidator") @@ -158,19 +173,22 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { // a validator should not timeout of the prevote round (TODO: unless the block is really big!) func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _ := randState(config, 1) + cs, _ := makeState(ctx, t, config, logger, 1) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) - cs.enterNewRound(height, round) - cs.startRoutines(3) + cs.enterNewRound(ctx, height, round) + cs.startRoutines(ctx, 3) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) // Check that Proposal, ProposalBlock, ProposalBlockParts are set. rs := cs.GetRoundState() @@ -185,22 +203,26 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { } // if we're a validator, enterPropose should not timeout - ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) + ensureNoNewTimeout(t, timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) } func TestStateBadProposal(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 2) + cs1, vss := makeState(ctx, t, config, logger, 2) height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) + propBlock, _, err := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) + require.NoError(t, err) // make the second validator the proposer by incrementing round round++ @@ -213,55 +235,57 @@ func TestStateBadProposal(t *testing.T) { } stateHash[0] = (stateHash[0] + 1) % 255 propBlock.AppHash = stateHash - propBlockParts := propBlock.MakePartSet(partSize) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vs2.Height, round, -1, blockID) + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } + err = vs2.SignProposal(ctx, config.ChainID(), p) + require.NoError(t, err) proposal.Signature = p.Signature // set the proposal block - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer") + require.NoError(t, err) // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // wait for proposal - ensureProposal(proposalCh, height, round, blockID) + ensureProposal(t, proposalCh, height, round, blockID) // wait for prevote - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) // add bad prevote from vs2 and wait for it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) // wait for precommit - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 2) + cs1, vss := makeState(ctx, t, config, logger, 2) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock() + propBlock, _, err := cs1.createProposalBlock() + require.NoError(t, err) propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} propBlock.Header.DataHash = propBlock.Data.Hash() @@ -269,13 +293,13 @@ func TestStateOversizedBlock(t *testing.T) { round++ incrementRound(vss[1:]...) - propBlockParts := propBlock.MakePartSet(partSize) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) + proposal := types.NewProposal(height, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() - if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } + err = vs2.SignProposal(ctx, config.ChainID(), p) + require.NoError(t, err) proposal.Signature = p.Signature totalBytes := 0 @@ -284,28 +308,26 @@ func TestStateOversizedBlock(t *testing.T) { totalBytes += len(part.Bytes) } - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer") + require.NoError(t, err) // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) // c1 should log an error with the block part message as it exceeds the consensus params. The // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) // and then should send nil prevote and precommit regardless of whether other validators prevote and // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevoteMatch(t, voteCh, height, round, nil) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } //---------------------------------------------------------------------------------------------------- @@ -314,101 +336,93 @@ func TestStateOversizedBlock(t *testing.T) { // propose, prevote, and precommit a block func TestStateFullRound1(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := randState(config, 1) + cs, vss := makeState(ctx, t, config, logger, 1) height, round := cs.Height, cs.Round - // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit - // before consensus can move to the next height (and cause a race condition) - if err := cs.eventBus.Stop(); err != nil { - t.Error(err) - } - eventBus := types.NewEventBusWithBufferCapacity(0) - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - cs.SetEventBus(eventBus) - if err := eventBus.Start(); err != nil { - t.Error(err) - } - - voteCh := subscribe(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) + propCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(propCh, height, round) - propBlockHash := cs.GetRoundState().ProposalBlock.Hash() + propBlock := ensureNewProposal(t, propCh, height, round) - ensurePrevote(voteCh, height, round) // wait for prevote - validatePrevote(t, cs, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash) // wait for prevote - ensurePrecommit(voteCh, height, round) // wait for precommit + ensurePrecommit(t, voteCh, height, round) // wait for precommit // we're going to roll right into new height - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) - validateLastPrecommit(t, cs, vss[0], propBlockHash) + validateLastPrecommit(ctx, t, cs, vss[0], propBlock.Hash) } // nil is proposed, so prevote and precommit nil func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := randState(config, 1) + cs, _ := makeState(ctx, t, config, logger, 1) height, round := cs.Height, cs.Round - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) - cs.enterPrevote(height, round) - cs.startRoutines(4) + cs.enterPrevote(ctx, height, round) + cs.startRoutines(ctx, 4) - ensurePrevote(voteCh, height, round) // prevote - ensurePrecommit(voteCh, height, round) // precommit - - // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) // prevote + ensurePrecommitMatch(t, voteCh, height, round, nil) // precommit } // run through propose, prevote, precommit commit with two validators // where the first validator has to wait for votes from the second func TestStateFullRound2(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 2) + cs1, vss := makeState(ctx, t, config, logger, 2) vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() - propBlockHash, propPartSetHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() + blockID := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propPartSetHeader, vs2) - ensurePrevote(voteCh, height, round) // prevote + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) // prevote - ensurePrecommit(voteCh, height, round) // precommit + ensurePrecommit(t, voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, 0, 0, vss[0], blockID.Hash, blockID.Hash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propPartSetHeader, vs2) - ensurePrecommit(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) + ensurePrecommit(t, voteCh, height, round) // wait to finish commit, propose in next height - ensureNewBlock(newBlockCh, height) + ensureNewBlock(t, newBlockCh, height) } //------------------------------------------------------------------------------------------ @@ -416,63 +430,72 @@ func TestStateFullRound2(t *testing.T) { // two validators, 4 rounds. // two vals take turns proposing. val1 locks on first one, precommits nil on everything else -func TestStateLockNoPOL(t *testing.T) { +func TestStateLock_NoPOL(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 2) + cs1, vss := makeState(ctx, t, config, logger, 2) vs2 := vss[1] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 */ // start round and wait for prevote - cs1.enterNewRound(height, round) - cs1.startRoutines(0) + cs1.enterNewRound(ctx, height, round) + cs1.startRoutines(ctx, 0) - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) roundState := cs1.GetRoundState() - theBlockHash := roundState.ProposalBlock.Hash() - thePartSetHeader := roundState.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: roundState.ProposalBlock.Hash(), + PartSetHeader: roundState.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, thePartSetHeader, vs2) - ensurePrevote(voteCh, height, round) // prevote + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2) + ensurePrevote(t, voteCh, height, round) // prevote + validatePrevote(ctx, t, cs1, round, vss[0], initialBlockID.Hash) - ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block - hash := make([]byte, len(theBlockHash)) - copy(hash, theBlockHash) + hash := make([]byte, len(initialBlockID.Hash)) + copy(hash, initialBlockID.Hash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, thePartSetHeader, vs2) - ensurePrecommit(voteCh, height, round) // precommit + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + Hash: hash, + PartSetHeader: initialBlockID.PartSetHeader, + }, vs2) + ensurePrecommit(t, voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /// round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* Round2 (cs1, B) // B B2 @@ -481,42 +504,41 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) rs := cs1.GetRoundState() - if rs.ProposalBlock != nil { - panic("Expected proposal block to be nil") - } + require.Nil(t, rs.ProposalBlock, "Expected proposal block to be nil") - // wait to finish prevote - ensurePrevote(voteCh, height, round) - // we should have prevoted our locked block - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + // we should have prevoted nil since we did not see a proposal in the round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add a conflicting prevote from the other validator - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + partSet, err := rs.LockedBlock.MakePartSet(partSize) + require.NoError(t, err) + conflictingBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), conflictingBlockID, vs2) + ensurePrevote(t, voteCh, height, round) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - - ensurePrecommit(voteCh, height, round) // precommit - // the proposed block should still be locked and our precommit added - // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + // the proposed block should still be locked block. + // we should precommit nil and be locked on the proposal. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // add conflicting precommit from vs2 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensurePrecommit(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), conflictingBlockID, vs2) + ensurePrecommit(t, voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // entering new round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("#### ONTO ROUND 2") /* Round3 (vs2, _) // B, B2 @@ -524,50 +546,54 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() // now we're on a new round and are the proposer - if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(fmt.Sprintf( - "Expected proposal block to be locked block. Got %v, Expected %v", - rs.ProposalBlock, - rs.LockedBlock)) - } - - ensurePrevote(voteCh, height, round) // prevote - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + require.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()), + "Expected proposal block to be locked block. Got %v, Expected %v", + rs.ProposalBlock, + rs.LockedBlock) + + ensurePrevote(t, voteCh, height, round) // prevote + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) + partSet, err = rs.ProposalBlock.MakePartSet(partSize) + require.NoError(t, err) + newBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), newBlockID, vs2) + ensurePrevote(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) // precommit + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensurePrecommit(t, voteCh, height, round) // precommit - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but be locked on proposal signAddVotes( - config, + ctx, + t, cs1, tmproto.PrecommitType, - hash, - rs.ProposalBlock.MakePartSet(partSize).Header(), + config.ChainID(), + newBlockID, vs2) // NOTE: conflicting precommits at same height - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - cs2, _ := randState(config, 2) // needed so generated block is different than locked block + cs2, _ := makeState(ctx, t, config, logger, 2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round+1) + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + propBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), } incrementRound(vs2) round++ // entering new round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("#### ONTO ROUND 3") /* Round4 (vs2, C) // B C // B C @@ -575,449 +601,839 @@ func TestStateLockNoPOL(t *testing.T) { // now we're on a new round and not the proposer // so set the proposal block - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { - t.Fatal(err) - } + bps3, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, bps3, "") + require.NoError(t, err) + + ensureNewProposal(t, proposalCh, height, round) - ensureNewProposal(proposalCh, height, round) - ensurePrevote(voteCh, height, round) // prevote - // prevote for locked block (not proposal) - validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) + // prevote for nil since we did not see a proposal for our locked block in the round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, 3, vss[0], nil) // prevote for proposed block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID, vs2) + ensurePrevote(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but locked on proposal signAddVotes( - config, + ctx, + t, cs1, tmproto.PrecommitType, - propBlock.Hash(), - propBlock.MakePartSet(partSize).Header(), + config.ChainID(), + propBlockID, vs2) // NOTE: conflicting precommits at same height - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) } -// 4 vals in two rounds, -// in round one: v1 precommits, other 3 only prevote so the block isn't committed -// in round two: v1 prevotes the same block that the node is locked on -// the others prevote a new block hence v1 changes lock and precommits the new block with the others -func TestStateLockPOLRelock(t *testing.T) { +// TestStateLock_POLUpdateLock tests that a validator updates its locked +// block if the following conditions are met within a round: +// 1. The validator received a valid proposal for the block +// 2. The validator received prevotes representing greater than 2/3 of the voting +// power on the network for the block. +func TestStateLock_POLUpdateLock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round1 (cs1, B) // B B B B// B nil B nil + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. - eg. vs2 and vs4 didn't see the 2/3 prevotes + This ensures that cs1 will lock on B in this round but not precommit it. */ + t.Log("### Starting Round 0") // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewRound(newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) // our precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) - // before we timeout to the new round set the new proposal - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - propBlockParts := propBlock.MakePartSet(partSize) - propBlockHash := propBlock.Hash() - require.NotEqual(t, propBlockHash, theBlockHash) + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + /* + Round 1: + Create a block, D and send a proposal for it to cs1 + Send a prevote for D from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + + Check that cs1 is now locked on the new block, D and no longer on the old block. + */ + t.Log("### Starting Round 1") incrementRound(vs2, vs3, vs4) + round++ - // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + // Generate a new proposal block. + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) + require.NoError(t, err) + propBlockR1Hash := propBlockR1.Hash() + r1BlockID := types.BlockID{ + Hash: propBlockR1Hash, + PartSetHeader: propBlockR1Parts.Header(), + } + require.NotEqual(t, propBlockR1Hash, initialBlockID.Hash) + err = cs1.SetProposalAndBlock(ctx, propR1, propBlockR1, propBlockR1Parts, "some peer") + require.NoError(t, err) - round++ // moving to the next round - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) + ensureNewRound(t, newRoundCh, height, round) + + // ensure that the validator receives the proposal. + ensureNewProposal(t, proposalCh, height, round) + + // Prevote our nil since the proposal does not match our locked block. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // Add prevotes from the remainder of the validators for the new locked block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + + // Check that we lock on a new block. + ensureLock(t, lockCh, height, round) + + ensurePrecommit(t, voteCh, height, round) + + // We should now be locked on the new block and prevote it since we saw a sufficient amount + // prevote for the block. + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockR1Hash, propBlockR1Hash) +} + +// TestStateLock_POLRelock tests that a validator updates its locked round if +// it receives votes representing over 2/3 of the voting power on the network +// for a block that it is already locked in. +func TestStateLock_POLRelock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + logger := log.NewNopLogger() + + cs1, vss := makeState(ctx, t, config, logger, 4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + relockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryRelock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + This ensures that cs1 will lock on B in this round but not precommit it. + */ + t.Log("### Starting Round 0") + + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + theBlock := rs.ProposalBlock + theBlockParts := rs.ProposalBlockParts + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 1") + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) + + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /* - Round2 (vs2, C) // B C C C // C C C _) + Round 1: + Create a proposal for block B, the same block from round 1. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. - cs1 changes lock! + Check that cs1 updates its 'locked round' value to the current round. */ + t.Log("### Starting Round 1") + incrementRound(vs2, vs3, vs4) + round++ + propR1 := types.NewProposal(height, round, cs1.ValidRound, blockID, theBlock.Header.Time) + p := propR1.ToProto() + err = vs2.SignProposal(ctx, cs1.state.ChainID, p) + require.NoError(t, err) + propR1.Signature = p.Signature + err = cs1.SetProposalAndBlock(ctx, propR1, theBlock, theBlockParts, "") + require.NoError(t, err) - // now we're on a new round and not the proposer - // but we should receive the proposal - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + // ensure that the validator receives the proposal. + ensureNewProposal(t, proposalCh, height, round) - // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + // Prevote our locked block since it matches the propsal seen in this round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) - ensurePrecommit(voteCh, height, round) - // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + // Add prevotes from the remainder of the validators for the locked block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) - ensureNewBlockHeader(newBlockCh, height, propBlockHash) + // Check that we relock. + ensureRelock(t, relockCh, height, round) - ensureNewRound(newRoundCh, height+1, 0) + ensurePrecommit(t, voteCh, height, round) + + // We should now be locked on the same block but with an updated locked round. + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) } -// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -func TestStateLockPOLUnlock(t *testing.T) { +// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil +// if it is locked on a block and misses the proposal in a round. +func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) + logger := log.NewNopLogger() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round1 (cs1, B) // B B B B // B nil B nil - eg. didn't see the 2/3 prevotes + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. */ + t.Log("### Starting Round 0") - // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockParts := propBlock.MakePartSet(partSize) + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - rs = cs1.GetRoundState() - lockedBlockHash := rs.LockedBlock.Hash() + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + /* + Round 1: + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + + Check that cs1 prevotes nil instead of its locked block, but ensure + that it maintains its locked block. + */ + t.Log("### Starting Round 1") incrementRound(vs2, vs3, vs4) - round++ // moving to the next round + round++ - ensureNewRound(newRoundCh, height, round) - t.Log("#### ONTO ROUND 1") + ensureNewRound(t, newRoundCh, height, round) + + // Prevote our nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + // Add prevotes from the remainder of the validators nil. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + ensurePrecommit(t, voteCh, height, round) + // We should now be locked on the same block but with an updated locked round. + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) +} + +// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil +// if it is locked on a block and misses the proposal in a round. +func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + config := configSetup(t) /* - Round2 (vs2, C) // B nil nil nil // nil nil nil _ - cs1 unlocks! + All of the assertions in this test occur on the `cs1` validator. + The test sends signed votes from the other validators to cs1 and + cs1's state is then examined to verify that it now matches the expected + state. */ - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) + + cs1, vss := makeState(ctx, t, config, logger, 4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(context.Background()) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. + */ + t.Log("### Starting Round 0") + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensureNewProposal(proposalCh, height, round) + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) - // go to prevote, prevote for locked block (not proposal) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], lockedBlockHash) - // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - // the polka makes us unlock and precommit nil - ensureNewUnlock(unlockCh, height, round) - ensurePrecommit(voteCh, height, round) + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - // we should have unlocked and committed nil - // NOTE: since we don't relock on nil, the lock round is -1 - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) - ensureNewRound(newRoundCh, height, round+1) + /* + Round 1: + Create a proposal for a new block. + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + + Check that cs1 prevotes nil instead of its locked block, but ensure + that it maintains its locked block. + */ + t.Log("### Starting Round 1") + incrementRound(vs2, vs3, vs4) + round++ + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockR1Parts, err := propBlockR1.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + propBlockR1Hash := propBlockR1.Hash() + require.NotEqual(t, propBlockR1Hash, blockID.Hash) + err = cs1.SetProposalAndBlock(ctx, propR1, propBlockR1, propBlockR1Parts, "some peer") + require.NoError(t, err) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + // Prevote our nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + // Add prevotes from the remainder of the validators for nil. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // We should now be locked on the same block but prevote nil. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) } -// 4 vals, v1 locks on proposed block in the first round but the other validators only prevote -// In the second round, v1 misses the proposal but sees a majority prevote an unknown block so -// v1 should unlock and precommit nil. In the third round another block is proposed, all vals -// prevote and now v1 can lock onto the third block and precommit that -func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { +// TestStateLock_POLDoesNotUnlock tests that a validator maintains its locked block +// despite receiving +2/3 nil prevotes and nil precommits from other validators. +// Tendermint used to 'unlock' its locked block when greater than 2/3 prevotes +// for a nil block were seen. This behavior has been removed and this test ensures +// that it has been completely removed. +func TestStateLock_POLDoesNotUnlock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + /* + All of the assertions in this test occur on the `cs1` validator. + The test sends signed votes from the other validators to cs1 and + cs1's state is then examined to verify that it now matches the expected + state. + */ - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes - - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, addr) /* - Round0 (cs1, A) // A A A A// A nil nil nil + Round 0: + Create a block, B + Send a prevote for B from each of the validators to `cs1`. + Send a precommit for B from one of the validtors to `cs1`. + + This ensures that cs1 will lock on B in this round. */ + t.Log("#### ONTO ROUND 0") // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewRound(newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - firstBlockHash := rs.ProposalBlock.Hash() - firstBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, firstBlockHash, firstBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) // our precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) + // the validator should have locked a block in this round. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + ensurePrecommit(t, voteCh, height, round) + // the proposed block should now be locked and our should be for this locked block. - // before we timeout to the new round set the new proposal - cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - secondBlockParts := propBlock.MakePartSet(partSize) - secondBlockHash := propBlock.Hash() - require.NotEqual(t, secondBlockHash, firstBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - incrementRound(vs2, vs3, vs4) + // Add precommits from the other validators. + // We only issue 1/2 Precommits for the block in this round. + // This ensures that the validator being tested does not commit the block. + // We do not want the validator to commit the block because we want the test + // test to proceeds to the next consensus round. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round++ // moving to the next round + /* + Round 1: + Send a prevote for nil from >2/3 of the validators to `cs1`. + Check that cs1 maintains its lock on B but precommits nil. + Send a precommit for nil from >2/3 of the validators to `cs1`. + */ + t.Log("#### ONTO ROUND 1") + round++ + incrementRound(vs2, vs3, vs4) + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + require.NotEqual(t, propBlock.Hash(), blockID.Hash) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "") + require.NoError(t, err) - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 1") + ensureNewRound(t, newRoundCh, height, round) + + ensureNewProposal(t, proposalCh, height, round) + + // Prevote for nil since the proposed block does not match our locked block. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // add >2/3 prevotes for nil from all other validators + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) + + // verify that we haven't update our locked block since the first round + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /* - Round1 (vs2, B) // A B B B // nil nil nil nil) + Round 2: + The validator cs1 saw >2/3 precommits for nil in the previous round. + Send the validator >2/3 prevotes for nil and ensure that it did not + unlock its block at the end of the previous round. */ + t.Log("#### ONTO ROUND 2") + round++ + incrementRound(vs2, vs3, vs4) + cs3 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock = decideProposal(ctx, t, cs3, vs3, vs3.Height, vs3.Round) + propBlockParts, err = propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "") + require.NoError(t, err) - // now we're on a new round but v1 misses the proposal + ensureNewRound(t, newRoundCh, height, round) - // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], firstBlockHash) + ensureNewProposal(t, proposalCh, height, round) - // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4) + // Prevote for nil since the proposal does not match our locked block. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - ensurePrecommit(voteCh, height, round) - // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - if err := cs1.SetProposalAndBlock(prop, propBlock, secondBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + ensurePrecommit(t, voteCh, height, round) + + // verify that we haven't update our locked block since the first round + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) + +} + +// TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock tests that observing +// a two thirds majority for a block does not cause a validator to upate its lock on the +// new block if a proposal was not seen for that block. +func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { + config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, config, logger, 4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. - // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + This ensures that cs1 will lock on B in this round but not precommit it. + */ + t.Log("### Starting Round 0") + startTestRound(ctx, cs1, height, round) - // before we timeout to the new round set the new proposal - cs3 := newState(cs1.state, vs3, kvstore.NewApplication()) - prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - thirdPropBlockParts := propBlock.MakePartSet(partSize) - thirdPropBlockHash := propBlock.Hash() - require.NotEqual(t, secondBlockHash, thirdPropBlockHash) - incrementRound(vs2, vs3, vs4) + ensurePrevote(t, voteCh, height, round) // prevote + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) // our precommit + // the proposed block should now be locked and our precommit added + validatePrecommit(ctx, t, cs1, round, round, vss[0], firstBlockID.Hash, firstBlockID.Hash) + + // add precommits from the rest + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 2") + /* + Round 1: + Create a new block, D but do not send it to cs1. + Send a prevote for D from each of the validators to cs1. + Check that cs1 does not update its locked block to this missed block D. + */ + t.Log("### Starting Round 1") + incrementRound(vs2, vs3, vs4) + round++ + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + secondBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } + require.NotEqual(t, secondBlockID.Hash, firstBlockID.Hash) + + ensureNewRound(t, newRoundCh, height, round) + + // prevote for nil since the proposal was not seen. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // now lets add prevotes from everyone else for the new block + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), secondBlockID, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, firstBlockID.Hash) +} + +// TestStateLock_DoesNotLockOnOldProposal tests that observing +// a two thirds majority for a block does not cause a validator to lock on the +// block if a proposal was not seen for that block in the current round, but +// was seen in a previous round. +func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + logger := log.NewNopLogger() + + cs1, vss := makeState(ctx, t, config, logger, 4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(context.Background()) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round2 (vs3, C) // C C C C // C nil nil nil) + Round 0: + cs1 creates a proposal for block B. + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will not lock on B. */ + t.Log("### Starting Round 0") + startTestRound(ctx, cs1, height, round) - if err := cs1.SetProposalAndBlock(prop, propBlock, thirdPropBlockParts, "some peer"); err != nil { - t.Fatal(err) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensurePrevote(voteCh, height, round) - // we are no longer locked to the first block so we should be able to prevote - validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash) + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // The proposed block should not have been locked. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // we have a majority, now vs1 can change lock to the third block - validatePrecommit(t, cs1, round, round, vss[0], thirdPropBlockHash, thirdPropBlockHash) + incrementRound(vs2, vs3, vs4) + + // timeout to new round + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + /* + Round 1: + No proposal new proposal is created. + Send a prevote for B, the block from round 0, from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + cs1 saw a POL for the block it saw in round 0. We ensure that it does not + lock on this block, since it did not see a proposal for it in this round. + */ + t.Log("### Starting Round 1") + round++ + ensureNewRound(t, newRoundCh, height, round) + + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // All validators prevote for the old block. + + // All validators prevote for the old block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) + + // Make sure that cs1 did not lock on the block since it did not receive a proposal for it. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) } // 4 vals // a polka at round 1 but we miss it // then a polka at round 2 that we lock on // then we see the polka from round 1 but shouldn't unlock -func TestStateLockPOLSafety1(t *testing.T) { +func TestStateLock_POLSafety1(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) - + ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash()) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: partSet.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, - propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), + prevotes := signVotes(ctx, t, tmproto.PrevoteType, config.ChainID(), + blockID, vs2, vs3, vs4) - t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) - // we do see them precommit nil - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // cs1 precommit nil - ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensurePrecommit(t, voteCh, height, round) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) t.Log("### ONTO ROUND 1") - - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) - incrementRound(vs2, vs3, vs4) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + r2BlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: propBlockParts.Header(), + } + + ensureNewRound(t, newRoundCh, height, round) //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer") + require.NoError(t, err) /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! */ - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() - if rs.LockedBlock != nil { - panic("we should not be locked!") - } - t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) + require.Nil(t, rs.LockedBlock, "we should not be locked!") + + t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // go to prevote, prevote for proposal block - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, r2BlockID.Hash) // now we see the others prevote for it, so we should lock on it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r2BlockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r2BlockID.Hash, r2BlockID.Hash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("### ONTO ROUND 2") /*Round3 @@ -1025,22 +1441,18 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) // finish prevote - ensurePrevote(voteCh, height, round) - // we should prevote what we're locked on - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, nil) - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round addVotes(cs1, prevotes...) - t.Log("Done adding prevotes!") - - ensureNoNewRoundStep(newStepCh) + ensureNoNewRoundStep(t, newStepCh) } // 4 vals. @@ -1050,190 +1462,333 @@ func TestStateLockPOLSafety1(t *testing.T) { // What we want: // dont see P0, lock on P1 at R1, dont unlock using P0 at R2 -func TestStateLockPOLSafety2(t *testing.T) { +func TestStateLock_POLSafety2(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], height, round) + _, propBlock0 := decideProposal(ctx, t, cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() - propBlockParts0 := propBlock0.MakePartSet(partSize) + propBlockParts0, err := propBlock0.MakePartSet(partSize) + require.NoError(t, err) propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(ctx, t, tmproto.PrevoteType, config.ChainID(), propBlockID0, vs2, vs3, vs4) // the block for round 1 - prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash1 := propBlock1.Hash() - propBlockParts1 := propBlock1.MakePartSet(partSize) + prop1, propBlock1 := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) + propBlockParts1, err := propBlock1.MakePartSet(partSize) + require.NoError(t, err) + propBlockID1 := types.BlockID{Hash: propBlock1.Hash(), PartSetHeader: propBlockParts1.Header()} incrementRound(vs2, vs3, vs4) round++ // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { - t.Fatal(err) - } - ensureNewProposal(proposalCh, height, round) + err = cs1.SetProposalAndBlock(ctx, prop1, propBlock1, propBlockParts1, "some peer") + require.NoError(t, err) + ensureNewProposal(t, proposalCh, height, round) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + ensurePrevoteMatch(t, voteCh, height, round, propBlockID1.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID1, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockID1.Hash, propBlockID1.Hash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), propBlockID1, vs3) incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, round, 0, propBlockID0) + newProp := types.NewProposal(height, round, 0, propBlockID0, propBlock0.Header.Time) p := newProp.ToProto() - if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil { - t.Fatal(err) - } + err = vs3.SignProposal(ctx, config.ChainID(), p) + require.NoError(t, err) newProp.Signature = p.Signature - if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, newProp, propBlock0, propBlockParts0, "some peer") + require.NoError(t, err) // Add the pol votes addVotes(cs1, prevotes...) - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("### ONTO Round 2") /*Round2 // now we see the polka from round 1, but we shouldnt unlock */ - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + +} + +// TestState_PrevotePOLFromPreviousRound tests that a validator will prevote +// for a block if it is locked on a different block but saw a POL for the block +// it is not locked on in a previous round. +func TestState_PrevotePOLFromPreviousRound(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + logger := log.NewNopLogger() + + cs1, vss := makeState(ctx, t, config, logger, 4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(context.Background()) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. + */ + t.Log("### Starting Round 0") + + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + r0BlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r0BlockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) + + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r0BlockID.Hash, r0BlockID.Hash) + + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + /* + Round 1: + Create a block, D but do not send a proposal for it to cs1. + Send a prevote for D from each of the validators to cs1 so that cs1 sees a POL. + Send a precommit for nil from all of the validtors to cs1. + + cs1 has now seen greater than 2/3 of the voting power prevote D in this round + but cs1 did not see the proposal for D in this round so it will not prevote or precommit it. + */ + t.Log("### Starting Round 1") + incrementRound(vs2, vs3, vs4) + round++ + // Generate a new proposal block. + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + cs2.ValidRound = 1 + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, round) + t.Log(propR1.POLRound) + propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) + require.NoError(t, err) + r1BlockID := types.BlockID{ + Hash: propBlockR1.Hash(), + PartSetHeader: propBlockR1Parts.Header(), + } + require.NotEqual(t, r1BlockID.Hash, r0BlockID.Hash) + + ensureNewRound(t, newRoundCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + /* + Create a new proposal for D, the same block from Round 1. + cs1 already saw greater than 2/3 of the voting power on the network vote for + D in a previous round, so it should prevote D once it receives a proposal for it. + + cs1 does not need to receive prevotes from other validators before the proposal + in this round. It will still prevote the block. + + Send cs1 prevotes for nil and check that it still prevotes its locked block + and not the block that it prevoted. + */ + t.Log("### Starting Round 2") + incrementRound(vs2, vs3, vs4) + round++ + propR2 := types.NewProposal(height, round, 1, r1BlockID, propBlockR1.Header.Time) + p := propR2.ToProto() + err = vs3.SignProposal(ctx, cs1.state.ChainID, p) + require.NoError(t, err) + propR2.Signature = p.Signature + + // cs1 receives a proposal for D, the block that received a POL in round 1. + err = cs1.SetProposalAndBlock(ctx, propR2, propBlockR1, propBlockR1Parts, "") + require.NoError(t, err) - ensureNoNewUnlock(unlockCh) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + // We should now prevote this block, despite being locked on the block from + // round 0. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], r1BlockID.Hash) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // cs1 did not receive a POL within this round, so it should remain locked + // on the block from round 0. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, r0BlockID.Hash) } // 4 vals. -// polka P0 at R0 for B0. We lock B0 on P0 at R0. P0 unlocks value at R1. +// polka P0 at R0 for B0. We lock B0 on P0 at R0. // What we want: // P0 proposes B0 at R3. func TestProposeValidBlock(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, cfg, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) // the others sign a polka - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, cfg.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + ensurePrecommit(t, voteCh, height, round) + // we should have precommitted the proposed block in this round. + + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, cfg.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - - t.Log("### ONTO ROUND 2") + ensureNewRound(t, newRoundCh, height, round) + t.Log("### ONTO ROUND 1") // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + // We did not see a valid proposal within this round, so prevote nil. + ensurePrevoteMatch(t, voteCh, height, round, nil) - ensureNewUnlock(unlockCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, cfg.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + // we should have precommitted nil during this round because we received + // >2/3 precommits for nil from the other validators. + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, cfg.ChainID(), types.BlockID{}, vs2, vs3, vs4) - round += 2 // moving to the next round + round += 2 // increment by multiple rounds - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) t.Log("### ONTO ROUND 3") - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - t.Log("### ONTO ROUND 4") - - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), blockID.Hash)) assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) assert.True(t, rs.Proposal.POLRound == rs.ValidRound) assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) @@ -1243,46 +1798,52 @@ func TestProposeValidBlock(t *testing.T) { // P0 miss to lock B but set valid block to B after receiving delayed prevote. func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) // vs2 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) // vs3 send prevote nil - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs3) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) rs = cs1.GetRoundState() @@ -1291,14 +1852,14 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == -1) // vs2 send (delayed) prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs4) - ensureNewValidBlock(validBlockCh, height, round) + ensureNewValidBlock(t, validBlockCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) assert.True(t, rs.ValidRound == round) } @@ -1307,56 +1868,63 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // receiving delayed Block Proposal. func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribeToVoter(ctx, t, cs1, addr) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + ensureNewValidBlock(t, validBlockCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, partSet, "some peer") + require.NoError(t, err) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) assert.True(t, rs.ValidRound == round) } @@ -1364,23 +1932,26 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { // What we want: // P0 waits for timeoutPrecommit before starting next round func TestWaitingTimeoutOnNilPolka(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) + logger := log.NewNopLogger() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - ensureNewRound(newRoundCh, height, round+1) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewRound(t, newRoundCh, height, round+1) } // 4 vals, 3 Prevotes for nil from the higher round. @@ -1388,37 +1959,39 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { // P0 waits for timeoutPropose in the next round before entering prevote func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) } // 4 vals, 3 Precommits for nil from the higher round. @@ -1426,37 +1999,40 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { // P0 jump to higher round, precommit and start precommit wait func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) } // 4 vals, 3 Prevotes for nil in the current round. @@ -1464,37 +2040,42 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { // P0 wait for timeoutPropose to expire before sending prevote. func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) } // What we want: // P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1502,25 +2083,29 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) - _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + _, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) + ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepCommit) assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) } @@ -1529,42 +2114,49 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { // After receiving block, it executes block and moves to the next height. func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepCommit) assert.True(t, rs.CommitRound == vs2.Round) assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, partSet, "some peer") + require.NoError(t, err) - ensureNewProposal(proposalCh, height, round) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewProposal(t, proposalCh, height, round) + ensureNewRound(t, newRoundCh, height+1, 0) } type fakeTxNotifier struct { @@ -1584,60 +2176,64 @@ func (n *fakeTxNotifier) Notify() { // start of the next round func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // wait till timeout occurs - ensurePrecommitTimeout(precommitTimeoutCh) + ensureNewTimeout(t, precommitTimeoutCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) - ensureNewRound(newRoundCh, height, round+1) + ensureNewRound(t, newRoundCh, height, round+1) // majority is now reached - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) cs1.txNotifier.(*fakeTxNotifier).Notify() - ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) rs = cs1.GetRoundState() assert.False( t, @@ -1647,55 +2243,59 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config.Consensus.SkipTimeoutCommit = false - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, config, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) - prop, propBlock := decideProposal(cs1, vs2, height+1, 0) - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, height+1, 0) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - ensureNewProposal(proposalCh, height+1, 0) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer") + require.NoError(t, err) + ensureNewProposal(t, proposalCh, height+1, 0) rs = cs1.GetRoundState() assert.False( @@ -1704,83 +2304,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { "triggeredTimeoutPrecommit should be false at the beginning of each height") } -//------------------------------------------------------------------------------------------ -// SlashingSuite -// TODO: Slashing - -/* -func TestStateSlashingPrevotes(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, tmproto.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) - - <-timeoutWaitCh - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add the conflicting vote - signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} - -func TestStateSlashingPrecommits(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - // add prevote from vs2 - signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - <-voteCh // precommit - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, tmproto.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add precommit from vs2 - signAddVotes(cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} -*/ - //------------------------------------------------------------------------------------------ // CatchupSuite @@ -1790,79 +2313,88 @@ func TestStateSlashingPrecommits(t *testing.T) { // 4 vals. // we receive a final precommit after going into next round, but others might have gone to commit already! func TestStateHalt1(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss := randState(config, 4) + cs1, vss := makeState(ctx, t, cfg, logger, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - pv1, err := cs1.privValidator.GetPubKey(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) + pv1, err := cs1.privValidator.GetPubKey(ctx) require.NoError(t, err) addr := pv1.Address() - voteCh := subscribeToVoter(cs1, addr) + voteCh := subscribeToVoter(ctx, t, cs1, addr) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockParts := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, cfg.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, cfg.ChainID(), types.BlockID{}, vs2) // didnt receive proposal + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, cfg.ChainID(), blockID, vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, config, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) + precommit4 := signVote(ctx, t, vs4, tmproto.PrecommitType, cfg.ChainID(), blockID) incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - rs = cs1.GetRoundState() + ensureNewRound(t, newRoundCh, height, round) t.Log("### ONTO ROUND 1") /*Round2 - // we timeout and prevote our lock + // we timeout and prevote // a polka happened but we didn't see it! */ - // go to prevote, prevote for locked block - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + // prevote for nil since we did not receive a proposal in this round. + ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) // receiving that precommit should take us straight to commit - ensureNewBlock(newBlockCh, height) + ensureNewBlock(t, newBlockCh, height) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) } func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create dummy peer - cs, _ := randState(config, 1) + cs, _ := makeState(ctx, t, config, logger, 1) peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) @@ -1875,26 +2407,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peerID}) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{msg, "peer2"}) + cs.handleMsg(ctx, msgInfo{msg, "peer2", tmtime.Now()}) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peerID}) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peerID}) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peerID}) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}) select { case <-cs.statsMsgQueue: @@ -1906,31 +2438,37 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss := randState(config, 2) + cs, vss := makeState(ctx, t, config, logger, 2) // create dummy peer peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) + blockID := types.BlockID{ + Hash: randBytes, + } - vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote := signVote(ctx, t, vss[1], tmproto.PrecommitType, config.ChainID(), blockID) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peerID}) + cs.handleMsg(ctx, msgInfo{voteMessage, peerID, tmtime.Now()}) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) + cs.handleMsg(ctx, msgInfo{&VoteMessage{vote}, "peer2", tmtime.Now()}) // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote = signVote(ctx, t, vss[1], tmproto.PrecommitType, config.ChainID(), blockID) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID}) + cs.handleMsg(ctx, msgInfo{&VoteMessage{vote}, peerID, tmtime.Now()}) select { case <-cs.statsMsgQueue: @@ -1942,33 +2480,170 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, vss := randState(config, 2) + _, vss := makeState(ctx, t, config, logger, 2) randBytes := tmrand.Bytes(tmhash.Size) - vote := signVote(vss[1], - config, + vote := signVote( + ctx, + t, + vss[1], tmproto.PrecommitType, - randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, - ) + config.ChainID(), - vote2 := signVote(vss[1], - config, + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, + ) + vote2 := signVote( + ctx, + t, + vss[1], tmproto.PrecommitType, - randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, + config.ChainID(), + + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, ) require.Equal(t, vote, vote2) } +// TestStateTimestamp_ProposalNotMatch tests that a validator does not prevote a +// proposed block if the timestamp in the block does not matche the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalNotMatch(t *testing.T) { + config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, config, logger, 4) + height, round := cs1.Height, cs1.Round + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + + propBlock, _, err := cs1.createProposalBlock() + require.NoError(t, err) + round++ + incrementRound(vss[1:]...) + + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + // Create a proposal with a timestamp that does not match the timestamp of the block. + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time.Add(time.Millisecond)) + p := proposal.ToProto() + err = vs2.SignProposal(ctx, config.ChainID(), p) + require.NoError(t, err) + proposal.Signature = p.Signature + require.NoError(t, cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer")) + + startTestRound(ctx, cs1, height, round) + ensureProposal(t, proposalCh, height, round, blockID) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // ensure that the validator prevotes nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) +} + +// TestStateTimestamp_ProposalMatch tests that a validator prevotes a +// proposed block if the timestamp in the block matches the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalMatch(t *testing.T) { + config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, config, logger, 4) + height, round := cs1.Height, cs1.Round + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey(ctx) + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(ctx, t, cs1, addr) + + propBlock, _, err := cs1.createProposalBlock() + require.NoError(t, err) + round++ + incrementRound(vss[1:]...) + + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + // Create a proposal with a timestamp that matches the timestamp of the block. + proposal := types.NewProposal(vs2.Height, round, -1, blockID, propBlock.Header.Time) + p := proposal.ToProto() + err = vs2.SignProposal(ctx, config.ChainID(), p) + require.NoError(t, err) + proposal.Signature = p.Signature + require.NoError(t, cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer")) + + startTestRound(ctx, cs1, height, round) + ensureProposal(t, proposalCh, height, round, blockID) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // ensure that the validator prevotes the block. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], propBlock.Hash()) + + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 1, vss[0], propBlock.Hash(), propBlock.Hash()) +} + // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return sub.Out() +func subscribe( + ctx context.Context, + t *testing.T, + eventBus *eventbus.EventBus, + q *tmquery.Query, +) <-chan tmpubsub.Message { + t.Helper() + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: q, + }) + require.NoErrorf(t, err, "Failed to subscribe %q to %v: %v", testSubscriber, q, err) + ch := make(chan tmpubsub.Message) + go func() { + for { + next, err := sub.Next(ctx) + if err != nil { + if ctx.Err() != nil { + return + } + t.Errorf("Subscription for %v unexpectedly terminated: %v", q, err) + return + } + select { + case ch <- next: + case <-ctx.Done(): + return + } + } + }() + return ch } diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index fb3571ac86..26570b7342 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -15,12 +16,11 @@ var ( // conditional on the height/round/step in the timeoutInfo. // The timeoutInfo.Duration may be non-positive. type TimeoutTicker interface { - Start() error + Start(context.Context) error Stop() error + IsRunning() bool Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer - - SetLogger(log.Logger) } // timeoutTicker wraps time.Timer, @@ -30,6 +30,7 @@ type TimeoutTicker interface { // and fired on the tockChan. type timeoutTicker struct { service.BaseService + logger log.Logger timer *time.Timer tickChan chan timeoutInfo // for scheduling timeouts @@ -37,30 +38,27 @@ type timeoutTicker struct { } // NewTimeoutTicker returns a new TimeoutTicker. -func NewTimeoutTicker() TimeoutTicker { +func NewTimeoutTicker(logger log.Logger) TimeoutTicker { tt := &timeoutTicker{ + logger: logger, timer: time.NewTimer(0), tickChan: make(chan timeoutInfo, tickTockBufferSize), tockChan: make(chan timeoutInfo, tickTockBufferSize), } - tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.BaseService = *service.NewBaseService(logger, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout return tt } // OnStart implements service.Service. It starts the timeout routine. -func (t *timeoutTicker) OnStart() error { - - go t.timeoutRoutine() +func (t *timeoutTicker) OnStart(ctx context.Context) error { + go t.timeoutRoutine(ctx) return nil } // OnStop implements service.Service. It stops the timeout routine. -func (t *timeoutTicker) OnStop() { - t.BaseService.OnStop() - t.stopTimer() -} +func (t *timeoutTicker) OnStop() { t.stopTimer() } // Chan returns a channel on which timeouts are sent. func (t *timeoutTicker) Chan() <-chan timeoutInfo { @@ -83,7 +81,6 @@ func (t *timeoutTicker) stopTimer() { select { case <-t.timer.C: default: - t.Logger.Debug("Timer already stopped") } } } @@ -91,13 +88,12 @@ func (t *timeoutTicker) stopTimer() { // send on tickChan to start a new timer. // timers are interupted and replaced by new ticks from later steps // timeouts of 0 on the tickChan will be immediately relayed to the tockChan -func (t *timeoutTicker) timeoutRoutine() { - t.Logger.Debug("Starting timeout routine") +func (t *timeoutTicker) timeoutRoutine(ctx context.Context) { var ti timeoutInfo for { select { case newti := <-t.tickChan: - t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) + t.logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) // ignore tickers for old height/round/step if newti.Height < ti.Height { @@ -119,15 +115,20 @@ func (t *timeoutTicker) timeoutRoutine() { // NOTE time.Timer allows duration to be non-positive ti = newti t.timer.Reset(ti.Duration) - t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + t.logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) case <-t.timer.C: - t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + t.logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. // We can eliminate it by merging the timeoutRoutine into receiveRoutine // and managing the timeouts ourselves with a millisecond ticker - go func(toi timeoutInfo) { t.tockChan <- toi }(ti) - case <-t.Quit(): + go func(toi timeoutInfo) { + select { + case t.tockChan <- toi: + case <-ctx.Done(): + } + }(ti) + case <-ctx.Done(): return } } diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index 86b3e2c4f7..86c5d4c464 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -1,12 +1,12 @@ package types import ( + "encoding/json" "errors" "fmt" "strings" "sync" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -237,7 +237,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return tmjson.Marshal(hvs.toAllRoundVotes()) + return json.Marshal(hvs.toAllRoundVotes()) } func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index b3830a3f65..3ebfcf2ee2 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -2,10 +2,11 @@ package types import ( "context" - "fmt" + "log" "os" "testing" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" @@ -18,30 +19,37 @@ import ( var cfg *config.Config // NOTE: must be reset for each _test.go file func TestMain(m *testing.M) { - cfg = config.ResetTestRoot("consensus_height_vote_set_test") + var err error + cfg, err = config.ResetTestRoot("consensus_height_vote_set_test") + if err != nil { + log.Fatal(err) + } code := m.Run() os.RemoveAll(cfg.RootDir) os.Exit(code) } func TestPeerCatchupRounds(t *testing.T) { - valSet, privVals := factory.RandValidatorSet(10, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1) hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -57,12 +65,18 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { +func makeVoteHR( + ctx context.Context, + t *testing.T, + height int64, + valIndex, round int32, + privVals []types.PrivValidator, +) *types.Vote { + t.Helper() + privVal := privVals[valIndex] - pubKey, err := privVal.GetPubKey(context.Background()) - if err != nil { - panic(err) - } + pubKey, err := privVal.GetPubKey(ctx) + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) @@ -78,10 +92,8 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty chainID := cfg.ChainID() v := vote.ToProto() - err = privVal.SignVote(context.Background(), chainID, v) - if err != nil { - panic(fmt.Sprintf("Error signing vote: %v", err)) - } + err = privVal.SignVote(ctx, chainID, v) + require.NoError(t, err, "Error signing vote") vote.Signature = v.Signature diff --git a/internal/consensus/types/peer_round_state.go b/internal/consensus/types/peer_round_state.go index 9d294d9afe..3f100414fb 100644 --- a/internal/consensus/types/peer_round_state.go +++ b/internal/consensus/types/peer_round_state.go @@ -13,9 +13,9 @@ import ( // PeerRoundState contains the known state of a peer. // NOTE: Read-only when returned by PeerState.GetRoundState(). type PeerRoundState struct { - Height int64 `json:"height"` // Height peer is at - Round int32 `json:"round"` // Round peer is at, -1 if unknown. - Step RoundStepType `json:"step"` // Step peer is at + Height int64 `json:"height,string"` // Height peer is at + Round int32 `json:"round"` // Round peer is at, -1 if unknown. + Step RoundStepType `json:"step"` // Step peer is at // Estimated start of round 0 at this height StartTime time.Time `json:"start_time"` diff --git a/internal/consensus/types/round_state.go b/internal/consensus/types/round_state.go index 9e67b76c07..566ca04d04 100644 --- a/internal/consensus/types/round_state.go +++ b/internal/consensus/types/round_state.go @@ -65,20 +65,21 @@ func (rs RoundStepType) String() string { // NOTE: Not thread safe. Should only be manipulated by functions downstream // of the cs.receiveRoutine type RoundState struct { - Height int64 `json:"height"` // Height we are working on + Height int64 `json:"height,string"` // Height we are working on Round int32 `json:"round"` Step RoundStepType `json:"step"` StartTime time.Time `json:"start_time"` // Subjective time when +2/3 precommits for Block at Round were found - CommitTime time.Time `json:"commit_time"` - Validators *types.ValidatorSet `json:"validators"` - Proposal *types.Proposal `json:"proposal"` - ProposalBlock *types.Block `json:"proposal_block"` - ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int32 `json:"locked_round"` - LockedBlock *types.Block `json:"locked_block"` - LockedBlockParts *types.PartSet `json:"locked_block_parts"` + CommitTime time.Time `json:"commit_time"` + Validators *types.ValidatorSet `json:"validators"` + Proposal *types.Proposal `json:"proposal"` + ProposalReceiveTime time.Time `json:"proposal_receive_time"` + ProposalBlock *types.Block `json:"proposal_block"` + ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` + LockedRound int32 `json:"locked_round"` + LockedBlock *types.Block `json:"locked_block"` + LockedBlockParts *types.PartSet `json:"locked_block_parts"` // Last known round with POL for non-nil valid block. ValidRound int32 `json:"valid_round"` diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index 0d9efb839f..92d7a6b827 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/binary" "errors" "fmt" @@ -11,8 +12,8 @@ import ( "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/internal/jsontypes" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/libs/service" @@ -40,15 +41,17 @@ type TimedWALMessage struct { // EndHeightMessage marks the end of the given height inside WAL. // @internal used by scripts/wal2json util. type EndHeightMessage struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` } +func (EndHeightMessage) TypeTag() string { return "tendermint/wal/EndHeightMessage" } + type WALMessage interface{} func init() { - tmjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") - tmjson.RegisterType(timeoutInfo{}, "tendermint/wal/TimeoutInfo") - tmjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") + jsontypes.MustRegister(msgInfo{}) + jsontypes.MustRegister(timeoutInfo{}) + jsontypes.MustRegister(EndHeightMessage{}) } //-------------------------------------------------------- @@ -63,7 +66,7 @@ type WAL interface { SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) // service methods - Start() error + Start(context.Context) error Stop() error Wait() } @@ -75,6 +78,7 @@ type WAL interface { // again. type BaseWAL struct { service.BaseService + logger log.Logger group *auto.Group @@ -88,22 +92,23 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. -func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { +func NewWAL(ctx context.Context, logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { err := tmos.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) } - group, err := auto.OpenGroup(walFile, groupOptions...) + group, err := auto.OpenGroup(ctx, logger, walFile, groupOptions...) if err != nil { return nil, err } wal := &BaseWAL{ + logger: logger, group: group, enc: NewWALEncoder(group), flushInterval: walDefaultFlushInterval, } - wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + wal.BaseService = *service.NewBaseService(logger, "baseWAL", wal) return wal, nil } @@ -116,12 +121,7 @@ func (wal *BaseWAL) Group() *auto.Group { return wal.group } -func (wal *BaseWAL) SetLogger(l log.Logger) { - wal.BaseService.Logger = l - wal.group.SetLogger(l) -} - -func (wal *BaseWAL) OnStart() error { +func (wal *BaseWAL) OnStart(ctx context.Context) error { size, err := wal.group.Head.Size() if err != nil { return err @@ -130,23 +130,23 @@ func (wal *BaseWAL) OnStart() error { return err } } - err = wal.group.Start() + err = wal.group.Start(ctx) if err != nil { return err } wal.flushTicker = time.NewTicker(wal.flushInterval) - go wal.processFlushTicks() + go wal.processFlushTicks(ctx) return nil } -func (wal *BaseWAL) processFlushTicks() { +func (wal *BaseWAL) processFlushTicks(ctx context.Context) { for { select { case <-wal.flushTicker.C: if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("Periodic WAL flush failed", "err", err) + wal.logger.Error("Periodic WAL flush failed", "err", err) } - case <-wal.Quit(): + case <-ctx.Done(): return } } @@ -164,10 +164,14 @@ func (wal *BaseWAL) FlushAndSync() error { func (wal *BaseWAL) OnStop() { wal.flushTicker.Stop() if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("error on flush data to disk", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.logger.Error("error on flush data to disk", "error", err) + } } if err := wal.group.Stop(); err != nil { - wal.Logger.Error("error trying to stop wal", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + wal.logger.Error("error trying to stop wal", "error", err) + } } wal.group.Close() } @@ -175,7 +179,12 @@ func (wal *BaseWAL) OnStop() { // Wait for the underlying autofile group to finish shutting down // so it's safe to cleanup files. func (wal *BaseWAL) Wait() { - wal.group.Wait() + if wal.IsRunning() { + wal.BaseService.Wait() + } + if wal.group.IsRunning() { + wal.group.Wait() + } } // Write is called in newStep and for each receive on the @@ -187,7 +196,7 @@ func (wal *BaseWAL) Write(msg WALMessage) error { } if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { - wal.Logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height", + wal.logger.Error("error writing msg to consensus wal. WARNING: recover may not be possible for the current height", "err", err, "msg", msg) return err } @@ -208,7 +217,7 @@ func (wal *BaseWAL) WriteSync(msg WALMessage) error { } if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error(`WriteSync failed to flush consensus wal. + wal.logger.Error(`WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`, "err", err) return err @@ -240,7 +249,7 @@ func (wal *BaseWAL) SearchForEndHeight( // NOTE: starting from the last file in the group because we're usually // searching for the last height. See replay.go min, max := wal.group.MinIndex(), wal.group.MaxIndex() - wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max) + wal.logger.Info("Searching for height", "height", height, "min", min, "max", max) for index := max; index >= min; index-- { gr, err = wal.group.NewReader(index) if err != nil { @@ -260,7 +269,7 @@ func (wal *BaseWAL) SearchForEndHeight( break } if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { - wal.Logger.Error("Corrupted entry. Skipping...", "err", err) + wal.logger.Error("Corrupted entry. Skipping...", "err", err) // do nothing continue } else if err != nil { @@ -271,7 +280,7 @@ func (wal *BaseWAL) SearchForEndHeight( if m, ok := msg.Msg.(EndHeightMessage); ok { lastHeightFound = m.Height if m.Height == height { // found - wal.Logger.Info("Found", "height", height, "index", index) + wal.logger.Info("Found", "height", height, "index", index) return gr, true, nil } } @@ -370,14 +379,14 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { return nil, err } if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %w", err)} } crc := binary.BigEndian.Uint32(b) b = make([]byte, 4) _, err = dec.rd.Read(b) if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to read length: %w", err)} } length := binary.BigEndian.Uint32(b) @@ -403,7 +412,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { var res = new(tmcons.TimedWALMessage) err = proto.Unmarshal(data, res) if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %w", err)} } walMsg, err := WALFromProto(res.Msg) @@ -428,6 +437,6 @@ func (nilWAL) FlushAndSync() error { return nil } func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (nilWAL) Start() error { return nil } -func (nilWAL) Stop() error { return nil } -func (nilWAL) Wait() {} +func (nilWAL) Start(context.Context) error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index f81234f977..19d4472229 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -3,6 +3,7 @@ package consensus import ( "bufio" "bytes" + "context" "fmt" "io" mrand "math/rand" @@ -16,6 +17,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -29,13 +31,12 @@ import ( // persistent kvstore application and special consensus wal instance // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. -func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { +func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) (err error) { cfg := getConfig(t) - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) + app := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), "wal_generator")) t.Cleanup(func() { require.NoError(t, app.Close()) }) - logger := log.TestingLogger().With("wal_generator", "wal_generator") logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS @@ -65,35 +66,23 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics()) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), logger.With("module", "proxy"), proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { return fmt.Errorf("failed to start event bus: %w", err) } - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) - consensusState.SetLogger(logger) + consensusState := NewState(ctx, logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) if privValidator != nil && privValidator != (*privval.FilePV)(nil) { - consensusState.SetPrivValidator(privValidator) + consensusState.SetPrivValidator(ctx, privValidator) } // END OF COPY PASTE @@ -107,7 +96,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { consensusState.wal = wal - if err := consensusState.Start(); err != nil { + if err := consensusState.Start(ctx); err != nil { return fmt.Errorf("failed to start consensus state: %w", err) } @@ -126,11 +115,11 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } // WALWithNBlocks returns a WAL content with numBlocks. -func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { +func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlocks int) (data []byte, err error) { var b bytes.Buffer wr := bufio.NewWriter(&b) - if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { + if err := WALGenerateNBlocks(ctx, t, logger, wr, numBlocks); err != nil { return []byte{}, err } @@ -156,7 +145,8 @@ func makeAddrs() (p2pAddr, rpcAddr string) { // getConfig returns a config for test cases func getConfig(t *testing.T) *config.Config { - c := config.ResetTestRoot(t.Name()) + c, err := config.ResetTestRoot(t.Name()) + require.NoError(t, err) p2pAddr, rpcAddr := makeAddrs() c.P2P.ListenAddress = p2pAddr @@ -228,6 +218,6 @@ func (w *byteBufferWAL) SearchForEndHeight( return nil, false, nil } -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} +func (w *byteBufferWAL) Start(context.Context) error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 180af5f347..8a39b31b57 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -2,13 +2,14 @@ package consensus import ( "bytes" - "crypto/rand" + "context" + "errors" "path/filepath" - // "sync" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,45 +17,44 @@ import ( "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/libs/autofile" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" tmtypes "github.com/tendermint/tendermint/types" ) -const ( - walTestFlushInterval = time.Duration(100) * time.Millisecond -) +const walTestFlushInterval = 100 * time.Millisecond func TestWALTruncate(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // this magic number 4K can truncate the content when RotateFile. // defaultHeadSizeLimit(10M) is hard to simulate. // this magic number 1 * time.Millisecond make RotateFile check frequently. // defaultGroupCheckDuration(5s) is hard to simulate. - wal, err := NewWAL(walFile, + wal, err := NewWAL(ctx, logger, walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond), ) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(t, wal.Group(), 60) + err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60) require.NoError(t, err) + // put the leakcheck here so it runs after other cleanup + // functions. + t.Cleanup(leaktest.CheckTimeout(t, 500*time.Millisecond)) + time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run if err := wal.FlushAndSync(); err != nil { @@ -107,18 +107,14 @@ func TestWALWrite(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wal, err := NewWAL(ctx, log.TestingLogger(), walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(wal.Wait) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -144,15 +140,19 @@ func TestWALWrite(t *testing.T) { } func TestWALSearchForEndHeight(t *testing.T) { - walBody, err := WALWithNBlocks(t, 6) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + walBody, err := WALWithNBlocks(ctx, t, logger, 6) if err != nil { t.Fatal(err) } - walFile := tempWALWithData(walBody) + walFile := tempWALWithData(t, walBody) - wal, err := NewWAL(walFile) + wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -170,26 +170,31 @@ func TestWALSearchForEndHeight(t *testing.T) { } func TestWALPeriodicSync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond)) require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) - wal.SetLogger(log.TestingLogger()) + logger := log.NewNopLogger() // Generate some data - err = WALGenerateNBlocks(t, wal.Group(), 5) + err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5) require.NoError(t, err) // We should have data in the buffer now assert.NotZero(t, wal.Group().Buffered()) - require.NoError(t, wal.Start()) + require.NoError(t, wal.Start(ctx)) t.Cleanup(func() { if err := wal.Stop(); err != nil { - t.Error(err) + if !errors.Is(err, service.ErrAlreadyStopped) { + t.Error(err) + } } wal.Wait() }) @@ -208,69 +213,3 @@ func TestWALPeriodicSync(t *testing.T) { gr.Close() } } - -/* -var initOnce sync.Once - -func registerInterfacesOnce() { - initOnce.Do(func() { - var _ = wire.RegisterInterface( - struct{ WALMessage }{}, - wire.ConcreteType{[]byte{}, 0x10}, - ) - }) -} -*/ - -func nBytes(n int) []byte { - buf := make([]byte, n) - n, _ = rand.Read(buf) - return buf[:n] -} - -func benchmarkWalDecode(b *testing.B, n int) { - // registerInterfacesOnce() - buf := new(bytes.Buffer) - enc := NewWALEncoder(buf) - - data := nBytes(n) - if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { - b.Error(err) - } - - encoded := buf.Bytes() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(encoded) - dec := NewWALDecoder(buf) - if _, err := dec.Decode(); err != nil { - b.Fatal(err) - } - } - b.ReportAllocs() -} - -func BenchmarkWalDecode512B(b *testing.B) { - benchmarkWalDecode(b, 512) -} - -func BenchmarkWalDecode10KB(b *testing.B) { - benchmarkWalDecode(b, 10*1024) -} -func BenchmarkWalDecode100KB(b *testing.B) { - benchmarkWalDecode(b, 100*1024) -} -func BenchmarkWalDecode1MB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024) -} -func BenchmarkWalDecode10MB(b *testing.B) { - benchmarkWalDecode(b, 10*1024*1024) -} -func BenchmarkWalDecode100MB(b *testing.B) { - benchmarkWalDecode(b, 100*1024*1024) -} -func BenchmarkWalDecode1GB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024*1024) -} diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go new file mode 100644 index 0000000000..3d5f7d82f8 --- /dev/null +++ b/internal/eventbus/event_bus.go @@ -0,0 +1,224 @@ +package eventbus + +import ( + "context" + "fmt" + "strings" + + abci "github.com/tendermint/tendermint/abci/types" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +// Subscription is a proxy interface for a pubsub Subscription. +type Subscription interface { + ID() string + Next(context.Context) (tmpubsub.Message, error) +} + +// EventBus is a common bus for all events going through the system. +// It is a type-aware wrapper around an underlying pubsub server. +// All events should be published via the bus. +type EventBus struct { + service.BaseService + pubsub *tmpubsub.Server +} + +// NewDefault returns a new event bus with default options. +func NewDefault(l log.Logger) *EventBus { + logger := l.With("module", "eventbus") + pubsub := tmpubsub.NewServer(l, tmpubsub.BufferCapacity(0)) + b := &EventBus{pubsub: pubsub} + b.BaseService = *service.NewBaseService(logger, "EventBus", b) + return b +} + +func (b *EventBus) OnStart(ctx context.Context) error { + return b.pubsub.Start(ctx) +} + +func (b *EventBus) OnStop() {} + +func (b *EventBus) NumClients() int { + return b.pubsub.NumClients() +} + +func (b *EventBus) NumClientSubscriptions(clientID string) int { + return b.pubsub.NumClientSubscriptions(clientID) +} + +// Deprecated: Use SubscribeWithArgs instead. +func (b *EventBus) Subscribe(ctx context.Context, + clientID string, query *tmquery.Query, capacities ...int) (Subscription, error) { + + return b.pubsub.Subscribe(ctx, clientID, query, capacities...) +} + +func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) { + return b.pubsub.SubscribeWithArgs(ctx, args) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { + return b.pubsub.Unsubscribe(ctx, args) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Observe(ctx context.Context, observe func(tmpubsub.Message) error, queries ...*tmquery.Query) error { + return b.pubsub.Observe(ctx, observe, queries...) +} + +func (b *EventBus) Publish(ctx context.Context, eventValue string, eventData types.EventData) error { + tokens := strings.Split(types.EventTypeKey, ".") + event := abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: eventValue, + }, + }, + } + + return b.pubsub.PublishWithEvents(ctx, eventData, []abci.Event{event}) +} + +func (b *EventBus) PublishEventNewBlock(ctx context.Context, data types.EventDataNewBlock) error { + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block event + events = append(events, types.EventNewBlock) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewBlockHeader(ctx context.Context, data types.EventDataNewBlockHeader) error { + // no explicit deadline for publishing events + + events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + + // add Tendermint-reserved new block header event + events = append(events, types.EventNewBlockHeader) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewEvidence(ctx context.Context, evidence types.EventDataNewEvidence) error { + return b.Publish(ctx, types.EventNewEvidenceValue, evidence) +} + +func (b *EventBus) PublishEventVote(ctx context.Context, data types.EventDataVote) error { + return b.Publish(ctx, types.EventVoteValue, data) +} + +func (b *EventBus) PublishEventValidBlock(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventValidBlockValue, data) +} + +func (b *EventBus) PublishEventBlockSyncStatus(ctx context.Context, data types.EventDataBlockSyncStatus) error { + return b.Publish(ctx, types.EventBlockSyncStatusValue, data) +} + +func (b *EventBus) PublishEventStateSyncStatus(ctx context.Context, data types.EventDataStateSyncStatus) error { + return b.Publish(ctx, types.EventStateSyncStatusValue, data) +} + +// PublishEventTx publishes tx event with events from Result. Note it will add +// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys +// will be overwritten. +func (b *EventBus) PublishEventTx(ctx context.Context, data types.EventDataTx) error { + events := data.Result.Events + + // add Tendermint-reserved events + events = append(events, types.EventTx) + + tokens := strings.Split(types.TxHashKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%X", types.Tx(data.Tx).Hash()), + }, + }, + }) + + tokens = strings.Split(types.TxHeightKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%d", data.Height), + }, + }, + }) + + return b.pubsub.PublishWithEvents(ctx, data, events) +} + +func (b *EventBus) PublishEventNewRoundStep(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventNewRoundStepValue, data) +} + +func (b *EventBus) PublishEventTimeoutPropose(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventTimeoutProposeValue, data) +} + +func (b *EventBus) PublishEventTimeoutWait(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventTimeoutWaitValue, data) +} + +func (b *EventBus) PublishEventNewRound(ctx context.Context, data types.EventDataNewRound) error { + return b.Publish(ctx, types.EventNewRoundValue, data) +} + +func (b *EventBus) PublishEventCompleteProposal(ctx context.Context, data types.EventDataCompleteProposal) error { + return b.Publish(ctx, types.EventCompleteProposalValue, data) +} + +func (b *EventBus) PublishEventPolka(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventPolkaValue, data) +} + +func (b *EventBus) PublishEventRelock(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventRelockValue, data) +} + +func (b *EventBus) PublishEventLock(ctx context.Context, data types.EventDataRoundState) error { + return b.Publish(ctx, types.EventLockValue, data) +} + +func (b *EventBus) PublishEventValidatorSetUpdates(ctx context.Context, data types.EventDataValidatorSetUpdates) error { + return b.Publish(ctx, types.EventValidatorSetUpdatesValue, data) +} + +//----------------------------------------------------------------------------- + +// NopEventBus implements a types.BlockEventPublisher that discards all events. +type NopEventBus struct{} + +func (NopEventBus) PublishEventNewBlock(context.Context, types.EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(context.Context, types.EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventNewEvidence(context.Context, types.EventDataNewEvidence) error { + return nil +} + +func (NopEventBus) PublishEventTx(context.Context, types.EventDataTx) error { + return nil +} + +func (NopEventBus) PublishEventValidatorSetUpdates(context.Context, types.EventDataValidatorSetUpdates) error { + return nil +} diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go new file mode 100644 index 0000000000..94e9e701d5 --- /dev/null +++ b/internal/eventbus/event_bus_test.go @@ -0,0 +1,519 @@ +package eventbus_test + +import ( + "context" + "fmt" + mrand "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +func TestEventBusPublishEventTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + + // PublishEventTx adds 3 composite keys, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) + txsSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := txsSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.EqualValues(t, tx, edt.Tx) + assert.Equal(t, result, edt.Result) + }() + + err = eventBus.PublishEventTx(ctx, types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} + +func TestEventBusPublishEventNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, []types.Tx{}, []types.Evidence{}, nil, nil, nil) + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + }, + } + + // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" + blocksSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := blocksSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlock) + assert.Equal(t, block, edt.Block) + assert.Equal(t, blockID, edt.BlockID) + assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) + assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + }() + + err = eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{ + Block: block, + BlockID: blockID, + ResultBeginBlock: resultBeginBlock, + ResultEndBlock: resultEndBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block after 1 sec.") + } +} + +func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "recipient", Value: "bar"}, + {Key: "amount", Value: "5"}, + }, + }, + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "baz"}, + {Key: "recipient", Value: "cat"}, + {Key: "amount", Value: "13"}, + }, + }, + { + Type: "withdraw.rewards", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "bar"}, + {Key: "source", Value: "iceman"}, + {Key: "amount", Value: "33"}, + }, + }, + }, + } + + testCases := []struct { + query string + expectResults bool + }{ + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", + false, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", + false, + }, + } + + for i, tc := range testCases { + var name string + + if tc.expectResults { + name = fmt.Sprintf("ExpetedResultsCase%d", i) + } else { + name = fmt.Sprintf("NoResultsCase%d", i) + } + + t.Run(name, func(t *testing.T) { + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: tmquery.MustCompile(tc.query), + }) + require.NoError(t, err) + + gotResult := make(chan bool, 1) + go func() { + defer close(gotResult) + tctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := sub.Next(tctx) + if err == nil { + data := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.EqualValues(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + gotResult <- true + } + }() + + assert.NoError(t, eventBus.PublishEventTx(ctx, types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + })) + + require.NoError(t, ctx.Err(), "context should not have been canceled") + + if got := <-gotResult; got != tc.expectResults { + require.Failf(t, "Wrong transaction result", + "got a tx: %v, wanted a tx: %v", got, tc.expectResults) + } + }) + + } +} + +func TestEventBusPublishEventNewBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, []types.Tx{}, []types.Evidence{}, nil, nil, nil) + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, + }, + } + + // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" + headersSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := headersSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlockHeader) + assert.Equal(t, block.Header, edt.Header) + assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) + assert.Equal(t, resultEndBlock, edt.ResultEndBlock) + }() + + err = eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ + Header: block.Header, + ResultBeginBlock: resultBeginBlock, + ResultEndBlock: resultEndBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublishEventNewEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + ev, err := types.NewMockDuplicateVoteEvidence(ctx, 1, time.Now(), "test-chain-id") + require.NoError(t, err) + + const query = `tm.event='NewEvidence'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := evSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewEvidence) + assert.Equal(t, ev, edt.Evidence) + assert.Equal(t, int64(4), edt.Height) + }() + + err = eventBus.PublishEventNewEvidence(ctx, types.EventDataNewEvidence{ + Evidence: ev, + Height: 4, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + const numEventsExpected = 14 + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.All, + Limit: numEventsExpected, + }) + require.NoError(t, err) + + count := make(chan int, 1) + go func() { + defer close(count) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + for n := 0; ; n++ { + if _, err := sub.Next(ctx); err != nil { + count <- n + return + } + } + }() + + require.NoError(t, eventBus.Publish(ctx, types.EventNewBlockHeaderValue, + types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{})) + require.NoError(t, eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventVote(ctx, types.EventDataVote{})) + require.NoError(t, eventBus.PublishEventNewRoundStep(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutPropose(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutWait(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventNewRound(ctx, types.EventDataNewRound{})) + require.NoError(t, eventBus.PublishEventCompleteProposal(ctx, types.EventDataCompleteProposal{})) + require.NoError(t, eventBus.PublishEventPolka(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventRelock(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventLock(ctx, types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventValidatorSetUpdates(ctx, types.EventDataValidatorSetUpdates{})) + require.NoError(t, eventBus.PublishEventBlockSyncStatus(ctx, types.EventDataBlockSyncStatus{})) + require.NoError(t, eventBus.PublishEventStateSyncStatus(ctx, types.EventDataStateSyncStatus{})) + + require.GreaterOrEqual(t, <-count, numEventsExpected) +} + +func BenchmarkEventBus(b *testing.B) { + benchmarks := []struct { + name string + numClients int + randQueries bool + randEvents bool + }{ + {"10Clients1Query1Event", 10, false, false}, + {"100Clients", 100, false, false}, + {"1000Clients", 1000, false, false}, + + {"10ClientsRandQueries1Event", 10, true, false}, + {"100Clients", 100, true, false}, + {"1000Clients", 1000, true, false}, + + {"10ClientsRandQueriesRandEvents", 10, true, true}, + {"100Clients", 100, true, true}, + {"1000Clients", 1000, true, true}, + + {"10Clients1QueryRandEvents", 10, false, true}, + {"100Clients", 100, false, true}, + {"1000Clients", 1000, false, true}, + } + + for _, bm := range benchmarks { + bm := bm + b.Run(bm.name, func(b *testing.B) { + benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + }) + } +} + +func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { + // for random* functions + mrand.Seed(time.Now().Unix()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.TestingLogger()) // set buffer capacity to 0 so we are not testing cache + err := eventBus.Start(ctx) + if err != nil { + b.Error(err) + } + b.Cleanup(eventBus.Wait) + + q := types.EventQueryNewBlock + + for i := 0; i < numClients; i++ { + if randQueries { + q = randQuery() + } + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: q, + }) + if err != nil { + b.Fatal(err) + } + go func() { + for { + if _, err := sub.Next(ctx); err != nil { + return + } + } + }() + } + + eventValue := types.EventNewBlockValue + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if randEvents { + eventValue = randEventValue() + } + + err := eventBus.Publish(ctx, eventValue, types.EventDataString("Gamora")) + if err != nil { + b.Error(err) + } + } +} + +var events = []string{ + types.EventNewBlockValue, + types.EventNewBlockHeaderValue, + types.EventNewRoundValue, + types.EventNewRoundStepValue, + types.EventTimeoutProposeValue, + types.EventCompleteProposalValue, + types.EventPolkaValue, + types.EventLockValue, + types.EventRelockValue, + types.EventTimeoutWaitValue, + types.EventVoteValue, + types.EventBlockSyncStatusValue, + types.EventStateSyncStatusValue, +} + +func randEventValue() string { + return events[mrand.Intn(len(events))] +} + +var queries = []*tmquery.Query{ + types.EventQueryNewBlock, + types.EventQueryNewBlockHeader, + types.EventQueryNewRound, + types.EventQueryNewRoundStep, + types.EventQueryTimeoutPropose, + types.EventQueryCompleteProposal, + types.EventQueryPolka, + types.EventQueryLock, + types.EventQueryRelock, + types.EventQueryTimeoutWait, + types.EventQueryVote, + types.EventQueryBlockSyncStatus, + types.EventQueryStateSyncStatus, +} + +func randQuery() *tmquery.Query { + return queries[mrand.Intn(len(queries))] +} diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index f342dec4c9..a82c714fff 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -261,6 +261,10 @@ func (evpool *Pool) State() sm.State { return evpool.state } +func (evpool *Pool) Close() error { + return evpool.evidenceStore.Close() +} + // IsExpired checks whether evidence or a polc is expired by checking whether a height and time is older // than set by the evidence consensus parameters func (evpool *Pool) isExpired(height int64, time time.Time) bool { @@ -378,7 +382,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixKey)) if err != nil { - return nil, totalSize, fmt.Errorf("database error: %v", err) + return nil, totalSize, fmt.Errorf("database error: %w", err) } defer iter.Close() diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 69bb491e75..9d282ce196 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -38,8 +38,10 @@ func TestEvidencePoolBasic(t *testing.T) { blockStore = &mocks.BlockStore{} ) - valSet, privVals := factory.RandValidatorSet(1, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + valSet, privVals := factory.ValidatorSet(ctx, t, 1, 10) blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, ) @@ -54,8 +56,8 @@ func TestEvidencePoolBasic(t *testing.T) { require.Equal(t, 0, len(evs)) require.Zero(t, size) - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, privVals[0], evidenceChainID) - + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, defaultEvidenceTime, privVals[0], evidenceChainID) + require.NoError(t, err) // good evidence evAdded := make(chan struct{}) go func() { @@ -88,10 +90,13 @@ func TestEvidencePoolBasic(t *testing.T) { // Tests inbound evidence for the right time and height func TestAddExpiredEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var ( val = types.NewMockPV() height = int64(30) - stateStore = initializeValidatorState(t, val, height) + stateStore = initializeValidatorState(ctx, t, val, height) evidenceDB = dbm.NewMemDB() blockStore = &mocks.BlockStore{} expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) @@ -126,8 +131,12 @@ func TestAddExpiredEvidence(t *testing.T) { tc := tc t.Run(tc.evDescription, func(t *testing.T) { - ev := types.NewMockDuplicateVoteEvidenceWithValidator(tc.evHeight, tc.evTime, val, evidenceChainID) - err := pool.AddEvidence(ev) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, tc.evHeight, tc.evTime, val, evidenceChainID) + require.NoError(t, err) + err = pool.AddEvidence(ev) if tc.expErr { require.Error(t, err) } else { @@ -140,9 +149,14 @@ func TestAddExpiredEvidence(t *testing.T) { func TestReportConflictingVotes(t *testing.T) { var height int64 = 10 - pool, pv := defaultTestPool(t, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, pv := defaultTestPool(ctx, t, height) val := types.NewValidator(pv.PrivKey.PubKey(), 10) - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height+1, defaultEvidenceTime, pv, evidenceChainID) + + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height+1, defaultEvidenceTime, pv, evidenceChainID) + require.NoError(t, err) pool.ReportConflictingVotes(ev.VoteA, ev.VoteB) @@ -174,33 +188,40 @@ func TestReportConflictingVotes(t *testing.T) { func TestEvidencePoolUpdate(t *testing.T) { height := int64(21) - pool, val := defaultTestPool(t, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val := defaultTestPool(ctx, t, height) state := pool.State() // create two lots of old evidence that we expect to be pruned when we update - prunedEv := types.NewMockDuplicateVoteEvidenceWithValidator( + prunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 1, defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID, ) + require.NoError(t, err) - notPrunedEv := types.NewMockDuplicateVoteEvidenceWithValidator( + notPrunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 2, defaultEvidenceTime.Add(2*time.Minute), val, evidenceChainID, ) + require.NoError(t, err) require.NoError(t, pool.AddEvidence(prunedEv)) require.NoError(t, pool.AddEvidence(notPrunedEv)) - ev := types.NewMockDuplicateVoteEvidenceWithValidator( + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(21*time.Minute), val, evidenceChainID, ) + require.NoError(t, err) lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) block := types.MakeBlock(height+1, []types.Tx{}, []types.Evidence{ev}, nil, nil, lastCommit) @@ -227,7 +248,7 @@ func TestEvidencePoolUpdate(t *testing.T) { require.Equal(t, []types.Evidence{notPrunedEv}, evList) // b) If we try to check this evidence again it should fail because it has already been committed - err := pool.CheckEvidence(types.EvidenceList{ev}) + err = pool.CheckEvidence(types.EvidenceList{ev}) if assert.Error(t, err) { assert.Equal(t, "evidence was already committed", err.(*types.ErrInvalidEvidence).Reason.Error()) } @@ -236,29 +257,40 @@ func TestEvidencePoolUpdate(t *testing.T) { func TestVerifyPendingEvidencePasses(t *testing.T) { var height int64 = 1 - pool, val := defaultTestPool(t, height) - ev := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val := defaultTestPool(ctx, t, height) + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID, ) - + require.NoError(t, err) require.NoError(t, pool.AddEvidence(ev)) require.NoError(t, pool.CheckEvidence(types.EvidenceList{ev})) } func TestVerifyDuplicatedEvidenceFails(t *testing.T) { var height int64 = 1 - pool, val := defaultTestPool(t, height) - ev := types.NewMockDuplicateVoteEvidenceWithValidator( + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val := defaultTestPool(ctx, t, height) + + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID, ) - err := pool.CheckEvidence(types.EvidenceList{ev, ev}) + require.NoError(t, err) + err = pool.CheckEvidence(types.EvidenceList{ev, ev}) if assert.Error(t, err) { assert.Equal(t, "duplicate evidence", err.(*types.ErrInvalidEvidence).Reason.Error()) } @@ -271,8 +303,10 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { height int64 = 100 commonHeight int64 = 90 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ev, trusted, common := makeLunaticEvidence(t, height, commonHeight, + ev, trusted, common := makeLunaticEvidence(ctx, t, height, commonHeight, 10, 5, 5, defaultEvidenceTime, defaultEvidenceTime.Add(1*time.Hour)) state := sm.State{ @@ -326,33 +360,41 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { // Tests that restarting the evidence pool after a potential failure will recover the // pending evidence and continue to gossip it func TestRecoverPendingEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height := int64(10) val := types.NewMockPV() valAddress := val.PrivKey.PubKey().Address() evidenceDB := dbm.NewMemDB() - stateStore := initializeValidatorState(t, val, height) + stateStore := initializeValidatorState(ctx, t, val, height) state, err := stateStore.Load() require.NoError(t, err) - blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + require.NoError(t, err) // create previous pool and populate it pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) require.NoError(t, err) - goodEvidence := types.NewMockDuplicateVoteEvidenceWithValidator( + goodEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(10*time.Minute), val, evidenceChainID, ) - expiredEvidence := types.NewMockDuplicateVoteEvidenceWithValidator( + require.NoError(t, err) + expiredEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, int64(1), defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID, ) + require.NoError(t, err) require.NoError(t, pool.AddEvidence(goodEvidence)) require.NoError(t, pool.AddEvidence(expiredEvidence)) @@ -419,8 +461,8 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h return stateStore } -func initializeValidatorState(t *testing.T, privVal types.PrivValidator, height int64) sm.Store { - pubKey, _ := privVal.GetPubKey(context.Background()) +func initializeValidatorState(ctx context.Context, t *testing.T, privVal types.PrivValidator, height int64) sm.Store { + pubKey, _ := privVal.GetPubKey(ctx) validator := &types.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey} // create validator set and state @@ -434,22 +476,29 @@ func initializeValidatorState(t *testing.T, privVal types.PrivValidator, height // initializeBlockStore creates a block storage and populates it w/ a dummy // block at +height+. -func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.BlockStore { +func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.BlockStore, error) { blockStore := store.NewBlockStore(db) for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeCommit(i-1, valAddr) - block := sf.MakeBlock(state, i, lastCommit) + block, err := sf.MakeBlock(state, i, lastCommit) + if err != nil { + return nil, err + } + block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} const parts = 1 - partSet := block.MakePartSet(parts) + partSet, err := block.MakePartSet(parts) + if err != nil { + return nil, err + } seenCommit := makeCommit(i, valAddr) blockStore.SaveBlock(block, partSet, seenCommit) } - return blockStore + return blockStore, nil } func makeCommit(height int64, valAddr []byte) *types.Commit { @@ -463,13 +512,16 @@ func makeCommit(height int64, valAddr []byte) *types.Commit { return types.NewCommit(height, 0, types.BlockID{}, commitSigs) } -func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV) { +func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, types.MockPV) { + t.Helper() val := types.NewMockPV() valAddress := val.PrivKey.PubKey().Address() evidenceDB := dbm.NewMemDB() - stateStore := initializeValidatorState(t, val, height) - state, _ := stateStore.Load() - blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + stateStore := initializeValidatorState(ctx, t, val, height) + state, err := stateStore.Load() + require.NoError(t, err) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + require.NoError(t, err) pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) require.NoError(t, err, "test evidence pool could not be created") diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index c2f25bd361..76086f17e6 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -1,6 +1,7 @@ package evidence import ( + "context" "fmt" "runtime/debug" "sync" @@ -34,7 +35,7 @@ const ( func GetChannelDescriptor() *p2p.ChannelDescriptor { return &p2p.ChannelDescriptor{ ID: EvidenceChannel, - MessageType: new(tmproto.EvidenceList), + MessageType: new(tmproto.Evidence), Priority: 6, RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 32, @@ -44,15 +45,15 @@ func GetChannelDescriptor() *p2p.ChannelDescriptor { // Reactor handles evpool evidence broadcasting amongst peers. type Reactor struct { service.BaseService + logger log.Logger evpool *Pool evidenceCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} peerWG sync.WaitGroup - mtx tmsync.Mutex + mtx sync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } @@ -60,30 +61,36 @@ type Reactor struct { // service.Service interface. It accepts a p2p Channel dedicated for handling // envelopes with EvidenceList messages. func NewReactor( + ctx context.Context, logger log.Logger, - evidenceCh *p2p.Channel, + chCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, evpool *Pool, -) *Reactor { +) (*Reactor, error) { + evidenceCh, err := chCreator(ctx, GetChannelDescriptor()) + if err != nil { + return nil, err + } + r := &Reactor{ + logger: logger, evpool: evpool, evidenceCh: evidenceCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), } r.BaseService = *service.NewBaseService(logger, "Evidence", r) - return r + return r, err } // OnStart starts separate go routines for each p2p Channel and listens for // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processEvidenceCh() - go r.processPeerUpdates() +func (r *Reactor) OnStart(ctx context.Context) error { + go r.processEvidenceCh(ctx) + go r.processPeerUpdates(ctx) return nil } @@ -101,44 +108,31 @@ func (r *Reactor) OnStop() { // exit. r.peerWG.Wait() - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.evidenceCh.Done() - <-r.peerUpdates.Done() + // Close the evidence db + r.evpool.Close() } // handleEvidenceMessage handles envelopes sent from peers on the EvidenceChannel. // It returns an error only if the Envelope.Message is unknown for this channel // or if the given evidence is invalid. This should never be called outside of // handleMessage. -func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleEvidenceMessage(envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *tmproto.EvidenceList: - // TODO: Refactor the Evidence type to not contain a list since we only ever - // send and receive one piece of evidence at a time. Or potentially consider - // batching evidence. - // - // see: https://github.com/tendermint/tendermint/issues/4729 - for i := 0; i < len(msg.Evidence); i++ { - ev, err := types.EvidenceFromProto(&msg.Evidence[i]) - if err != nil { - logger.Error("failed to convert evidence", "err", err) - continue - } - - if err := r.evpool.AddEvidence(ev); err != nil { - // If we're given invalid evidence by the peer, notify the router that - // we should remove this peer by returning an error. - if _, ok := err.(*types.ErrInvalidEvidence); ok { - return err - } + case *tmproto.Evidence: + // Process the evidence received from a peer + // Evidence is sent and received one by one + ev, err := types.EvidenceFromProto(msg) + if err != nil { + logger.Error("failed to convert evidence", "err", err) + return err + } + if err := r.evpool.AddEvidence(ev); err != nil { + // If we're given invalid evidence by the peer, notify the router that + // we should remove this peer by returning an error. + if _, ok := err.(*types.ErrInvalidEvidence); ok { + return err } } @@ -152,11 +146,11 @@ func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -164,7 +158,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) + r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) switch chID { case EvidenceChannel: @@ -179,23 +173,18 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // processEvidenceCh implements a blocking event loop where we listen for p2p // Envelope messages from the evidenceCh. -func (r *Reactor) processEvidenceCh() { - defer r.evidenceCh.Close() - - for { - select { - case envelope := <-r.evidenceCh.In: - if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) - r.evidenceCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processEvidenceCh(ctx context.Context) { + iter := r.evidenceCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) + if serr := r.evidenceCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on evidence channel; closing...") - return } } } @@ -211,8 +200,8 @@ func (r *Reactor) processEvidenceCh() { // connects/disconnects frequently from the broadcasting peer(s). // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -221,8 +210,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: // Do not allow starting new evidence broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast loops, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -237,7 +225,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.peerRoutines[peerUpdate.NodeID] = closer r.peerWG.Add(1) - go r.broadcastEvidenceLoop(peerUpdate.NodeID, closer) + go r.broadcastEvidenceLoop(ctx, peerUpdate.NodeID, closer) } case p2p.PeerStatusDown: @@ -255,16 +243,12 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + r.processPeerUpdate(ctx, peerUpdate) + case <-ctx.Done(): return } } @@ -281,7 +265,7 @@ func (r *Reactor) processPeerUpdates() { // that the peer has already received or may not be ready for. // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, closer *tmsync.Closer) { var next *clist.CElement defer func() { @@ -292,7 +276,7 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos r.peerWG.Done() if e := recover(); e != nil { - r.Logger.Error( + r.logger.Error( "recovering from broadcasting evidence loop", "err", e, "stack", string(debug.Stack()), @@ -311,15 +295,12 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos continue } + case <-ctx.Done(): + return case <-closer.Done(): // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } @@ -333,13 +314,14 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos // and thus would not be able to process the evidence correctly. Also, the // peer may receive this piece of evidence multiple times if it added and // removed frequently from the broadcasting peer. - r.evidenceCh.Out <- p2p.Envelope{ - To: peerID, - Message: &tmproto.EvidenceList{ - Evidence: []tmproto.Evidence{*evProto}, - }, + + if err := r.evidenceCh.Send(ctx, p2p.Envelope{ + To: peerID, + Message: evProto, + }); err != nil { + return } - r.Logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID) + r.logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID) select { case <-time.After(time.Second * broadcastEvidenceIntervalS): @@ -354,9 +336,7 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos // explicitly closed to signal we should exit. return - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index cf8f840ead..7ee0814268 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -1,6 +1,7 @@ package evidence_test import ( + "context" "encoding/hex" "math/rand" "sync" @@ -44,7 +45,7 @@ type reactorTestSuite struct { numStateStores int } -func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { +func setup(ctx context.Context, t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { t.Helper() pID := make([]byte, 16) @@ -55,15 +56,15 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts := &reactorTestSuite{ numStateStores: numStateStores, logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}), reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), pools: make(map[types.NodeID]*evidence.Pool, numStateStores), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.EvidenceList)} - rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.Evidence)} + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) idx := 0 @@ -85,15 +86,22 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID]) - rts.reactors[nodeID] = evidence.NewReactor(logger, - rts.evidenceChannels[nodeID], + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.evidenceChannels[nodeID], nil + } + + rts.reactors[nodeID], err = evidence.NewReactor( + ctx, + logger, + chCreator, rts.peerUpdates[nodeID], rts.pools[nodeID]) + require.NoError(t, err) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) idx++ @@ -107,14 +115,14 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { } } - leaktest.Check(t) }) + t.Cleanup(leaktest.Check(t)) return rts } -func (rts *reactorTestSuite) start(t *testing.T) { - rts.network.Start(t) +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { + rts.network.Start(ctx, t) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), rts.numStateStores-1, @@ -190,22 +198,8 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence wg.Wait() } -func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor #%s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor #%d did not stop", id) - - } - - for id, ech := range rts.evidenceChannels { - require.Empty(t, ech.Out, "checking channel #%q", id) - } -} - func createEvidenceList( + ctx context.Context, t *testing.T, pool *evidence.Pool, val types.PrivValidator, @@ -216,13 +210,14 @@ func createEvidenceList( evList := make([]types.Evidence, numEvidence) for i := 0; i < numEvidence; i++ { - ev := types.NewMockDuplicateVoteEvidenceWithValidator( + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, int64(i+1), time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, evidenceChainID, ) - + require.NoError(t, err) require.NoError(t, pool.AddEvidence(ev), "adding evidence it#%d of %d to pool with height %d", i, numEvidence, pool.State().LastBlockHeight) @@ -233,31 +228,34 @@ func createEvidenceList( } func TestReactorMultiDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val := types.NewMockPV() height := int64(numEvidence) + 10 - stateDB1 := initializeValidatorState(t, val, height) - stateDB2 := initializeValidatorState(t, val, height) + stateDB1 := initializeValidatorState(ctx, t, val, height) + stateDB2 := initializeValidatorState(ctx, t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 20) primary := rts.nodes[0] secondary := rts.nodes[1] - _ = createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + _ = createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) - rts.start(t) + rts.start(ctx, t) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusUp) // Ensure "disconnecting" the secondary peer from the primary more than once // is handled gracefully. - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) _, err := primary.PeerManager.TryEvictNext() require.NoError(t, err) - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) require.Equal(t, secondary.PeerManager.Status(primary.NodeID), p2p.PeerStatusUp) @@ -270,6 +268,9 @@ func TestReactorMultiDisconnect(t *testing.T) { func TestReactorBroadcastEvidence(t *testing.T) { numPeers := 7 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // create a stateDB for all test suites (nodes) stateDBs := make([]sm.Store, numPeers) val := types.NewMockPV() @@ -278,11 +279,11 @@ func TestReactorBroadcastEvidence(t *testing.T) { // evidence for. height := int64(numEvidence) + 10 for i := 0; i < numPeers; i++ { - stateDBs[i] = initializeValidatorState(t, val, height) + stateDBs[i] = initializeValidatorState(ctx, t, val, height) } - rts := setup(t, stateDBs, 0) - rts.start(t) + rts := setup(ctx, t, stateDBs, 0) + rts.start(ctx, t) // Create a series of fixtures where each suite contains a reactor and // evidence pool. In addition, we mark a primary suite and the rest are @@ -300,7 +301,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { secondaryIDs = append(secondaryIDs, id) } - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. @@ -318,8 +319,6 @@ func TestReactorBroadcastEvidence(t *testing.T) { for _, pool := range rts.pools { require.Equal(t, numEvidence, int(pool.Size())) } - - rts.assertEvidenceChannelsEmpty(t) } // TestReactorSelectiveBroadcast tests a context where we have two reactors @@ -330,20 +329,23 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { height1 := int64(numEvidence) + 10 height2 := int64(numEvidence) / 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-20) and // stateDB2 only has heights 1-5. - stateDB1 := initializeValidatorState(t, val, height1) - stateDB2 := initializeValidatorState(t, val, height2) + stateDB1 := initializeValidatorState(ctx, t, val, height1) + stateDB2 := initializeValidatorState(ctx, t, val, height2) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) - rts.start(t) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) + rts.start(ctx, t) primary := rts.nodes[0] secondary := rts.nodes[1] // Send a list of valid evidence to the first reactor's, the one that is ahead, // evidence pool. - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. @@ -357,22 +359,23 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { require.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size())) require.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size())) - - rts.assertEvidenceChannelsEmpty(t) } func TestReactorBroadcastEvidence_Pending(t *testing.T) { val := types.NewMockPV() height := int64(10) - stateDB1 := initializeValidatorState(t, val, height) - stateDB2 := initializeValidatorState(t, val, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stateDB1 := initializeValidatorState(ctx, t, val, height) + stateDB2 := initializeValidatorState(ctx, t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 100) primary := rts.nodes[0] secondary := rts.nodes[1] - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. @@ -383,7 +386,7 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { // the secondary should have half the evidence as pending require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size())) - rts.start(t) + rts.start(ctx, t) // The secondary reactor should have received all the evidence ignoring the // already pending evidence. @@ -394,27 +397,27 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_Committed(t *testing.T) { val := types.NewMockPV() height := int64(10) - stateDB1 := initializeValidatorState(t, val, height) - stateDB2 := initializeValidatorState(t, val, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stateDB1 := initializeValidatorState(ctx, t, val, height) + stateDB2 := initializeValidatorState(ctx, t, val, height) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}, 0) primary := rts.nodes[0] secondary := rts.nodes[1] // add all evidence to the primary reactor - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. @@ -436,12 +439,7 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { require.Equal(t, 0, int(rts.pools[secondary.NodeID].Size())) // start the network and ensure it's configured - rts.start(t) - - // without the following sleep the test consistently fails; - // likely because the sleep forces a context switch that lets - // the router process other operations. - time.Sleep(2 * time.Millisecond) + rts.start(ctx, t) // The secondary reactor should have received all the evidence ignoring the // already committed evidence. @@ -450,11 +448,8 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { require.Len(t, rts.pools, 2) assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(), "primary node should have all the evidence") - if assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), - "secondary nodes should have caught up") { - - rts.assertEvidenceChannelsEmpty(t) - } + assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(), + "secondary nodes should have caught up") } func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { @@ -464,17 +459,20 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { stateDBs := make([]sm.Store, numPeers) val := types.NewMockPV() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // We need all validators saved for heights at least as high as we have // evidence for. height := int64(numEvidence) + 10 for i := 0; i < numPeers; i++ { - stateDBs[i] = initializeValidatorState(t, val, height) + stateDBs[i] = initializeValidatorState(ctx, t, val, height) } - rts := setup(t, stateDBs, 0) - rts.start(t) + rts := setup(ctx, t, stateDBs, 0) + rts.start(ctx, t) - evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) // every suite (reactor) connects to every other suite (reactor) for outerID, outerChan := range rts.peerChans { @@ -501,7 +499,6 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { } } -// nolint:lll func TestEvidenceListSerialization(t *testing.T) { exampleVote := func(msgType byte) *types.Vote { var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index df1642f82d..bcc008aee6 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -33,9 +33,12 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { totalVals = 10 byzVals = 4 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + attackTime := defaultEvidenceTime.Add(1 * time.Hour) // create valid lunatic evidence - ev, trusted, common := makeLunaticEvidence( + ev, trusted, common := makeLunaticEvidence(ctx, t, height, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) require.NoError(t, ev.ValidateBasic()) @@ -57,7 +60,7 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { assert.Error(t, ev.ValidateABCI(common.ValidatorSet, trusted.SignedHeader, defaultEvidenceTime)) // evidence without enough malicious votes should fail - ev, trusted, common = makeLunaticEvidence( + ev, trusted, common = makeLunaticEvidence(ctx, t, height, commonHeight, totalVals, byzVals-1, totalVals-byzVals, defaultEvidenceTime, attackTime) err = evidence.VerifyLightClientAttack(ev, common.SignedHeader, trusted.SignedHeader, common.ValidatorSet, defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) @@ -71,9 +74,12 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { totalVals = 10 byzVals = 4 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + attackTime := defaultEvidenceTime.Add(1 * time.Hour) // create valid lunatic evidence - ev, trusted, common := makeLunaticEvidence( + ev, trusted, common := makeLunaticEvidence(ctx, t, height, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) // now we try to test verification against state @@ -141,8 +147,10 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { ) attackTime := defaultEvidenceTime.Add(1 * time.Hour) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create a forward lunatic attack - ev, trusted, common := makeLunaticEvidence( + ev, trusted, common := makeLunaticEvidence(ctx, t, attackHeight, commonHeight, totalVals, byzVals, totalVals-byzVals, defaultEvidenceTime, attackTime) // now we try to test verification against state @@ -190,17 +198,19 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { } func TestVerifyLightClientAttack_Equivocation(t *testing.T) { - conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10) + + conflictingHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: 10, Time: defaultEvidenceTime, ValidatorsHash: conflictingVals.Hash(), }) - require.NoError(t, err) - trustedHeader, _ := factory.MakeHeader(&types.Header{ + trustedHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: 10, Time: defaultEvidenceTime, @@ -215,8 +225,9 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { // except the last validator vote twice blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) + commit, err := factory.MakeCommit(ctx, blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -233,30 +244,29 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - trustedCommit, err := factory.MakeCommit(trustedBlockID, 10, 1, + trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, Commit: trustedCommit, } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.NoError(t, err) + require.NoError(t, evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour)) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + assert.Error(t, evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour)) // conflicting header has different next validators hash which should have been correctly derived from // the previous round ev.ConflictingBlock.Header.NextValidatorsHash = crypto.CRandBytes(tmhash.Size) - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + assert.Error(t, evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour)) + // revert next validators hash ev.ConflictingBlock.Header.NextValidatorsHash = trustedHeader.NextValidatorsHash @@ -284,18 +294,19 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } func TestVerifyLightClientAttack_Amnesia(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var height int64 = 10 - conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10) + conflictingVals, conflictingPrivVals := factory.ValidatorSet(ctx, t, 5, 10) - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, ValidatorsHash: conflictingVals.Hash(), }) - require.NoError(t, err) - trustedHeader, _ := factory.MakeHeader(&types.Header{ + trustedHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, @@ -310,8 +321,9 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { // except the last validator vote twice. However this time the commits are of different rounds. blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) + commit, err := factory.MakeCommit(ctx, blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -328,23 +340,22 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) - trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, + trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + trustedSignedHeader := &types.SignedHeader{ Header: trustedHeader, Commit: trustedCommit, } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.NoError(t, err) + require.NoError(t, evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour)) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) - assert.Error(t, err) + assert.Error(t, evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour)) state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), @@ -376,9 +387,12 @@ type voteData struct { } func TestVerifyDuplicateVoteEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val := types.NewMockPV() val2 := types.NewMockPV() - valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(1)}) + valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(ctx, 1)}) blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) @@ -387,30 +401,30 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { const chainID = "mychain" - vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) + vote1 := makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) v1 := vote1.ToProto() - err := val.SignVote(context.Background(), chainID, v1) + err := val.SignVote(ctx, chainID, v1) require.NoError(t, err) - badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) + badVote := makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) bv := badVote.ToProto() - err = val2.SignVote(context.Background(), chainID, bv) + err = val2.SignVote(ctx, chainID, bv) require.NoError(t, err) vote1.Signature = v1.Signature badVote.Signature = bv.Signature cases := []voteData{ - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), true}, // different block ids - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3, defaultEvidenceTime), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4, defaultEvidenceTime), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime), false}, // wrong block id - {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong chain id - {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong height - {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2, defaultEvidenceTime), false}, // wrong round - {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2, defaultEvidenceTime), false}, // wrong step - {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong validator + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), true}, // different block ids + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID3, defaultEvidenceTime), true}, + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID4, defaultEvidenceTime), true}, + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime), false}, // wrong block id + {vote1, makeVote(ctx, t, val, "mychain2", 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong chain id + {vote1, makeVote(ctx, t, val, chainID, 0, 11, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong height + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 3, 1, blockID2, defaultEvidenceTime), false}, // wrong round + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 2, blockID2, defaultEvidenceTime), false}, // wrong step + {vote1, makeVote(ctx, t, val2, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong validator // a different vote time doesn't matter - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), true}, + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID2, time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), true}, {vote1, badVote, false}, // signed by wrong key } @@ -431,11 +445,14 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { } // create good evidence and correct validator power - goodEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) + goodEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime, val, chainID) + require.NoError(t, err) goodEv.ValidatorPower = 1 goodEv.TotalVotingPower = 1 - badEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) - badTimeEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime.Add(1*time.Minute), val, chainID) + badEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime, val, chainID) + require.NoError(t, err) + badTimeEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime.Add(1*time.Minute), val, chainID) + require.NoError(t, err) badTimeEv.ValidatorPower = 1 badTimeEv.TotalVotingPower = 1 state := sm.State{ @@ -469,51 +486,53 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { } func makeLunaticEvidence( + ctx context.Context, t *testing.T, height, commonHeight int64, totalVals, byzVals, phantomVals int, commonTime, attackTime time.Time, ) (ev *types.LightClientAttackEvidence, trusted *types.LightBlock, common *types.LightBlock) { - commonValSet, commonPrivVals := factory.RandValidatorSet(totalVals, defaultVotingPower) + t.Helper() + + commonValSet, commonPrivVals := factory.ValidatorSet(ctx, t, totalVals, defaultVotingPower) require.Greater(t, totalVals, byzVals) // extract out the subset of byzantine validators in the common validator set byzValSet, byzPrivVals := commonValSet.Validators[:byzVals], commonPrivVals[:byzVals] - phantomValSet, phantomPrivVals := factory.RandValidatorSet(phantomVals, defaultVotingPower) + phantomValSet, phantomPrivVals := factory.ValidatorSet(ctx, t, phantomVals, defaultVotingPower) conflictingVals := phantomValSet.Copy() require.NoError(t, conflictingVals.UpdateWithChangeSet(byzValSet)) conflictingPrivVals := append(phantomPrivVals, byzPrivVals...) - conflictingPrivVals = orderPrivValsByValSet(t, conflictingVals, conflictingPrivVals) + conflictingPrivVals = orderPrivValsByValSet(ctx, t, conflictingVals, conflictingPrivVals) - commonHeader, err := factory.MakeHeader(&types.Header{ + commonHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: commonHeight, Time: commonTime, }) - require.NoError(t, err) - trustedHeader, err := factory.MakeHeader(&types.Header{ + + trustedHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, }) - require.NoError(t, err) - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingHeader := factory.MakeHeader(t, &types.Header{ ChainID: evidenceChainID, Height: height, Time: attackTime, ValidatorsHash: conflictingVals.Hash(), }) - require.NoError(t, err) blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) + commit, err := factory.MakeCommit(ctx, blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) + ev = &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -537,10 +556,11 @@ func makeLunaticEvidence( ValidatorSet: commonValSet, } trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash()) - trustedVals, privVals := factory.RandValidatorSet(totalVals, defaultVotingPower) + trustedVals, privVals := factory.ValidatorSet(ctx, t, totalVals, defaultVotingPower) trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) - trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) + trustedCommit, err := factory.MakeCommit(ctx, trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) require.NoError(t, err) + trusted = &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: trustedHeader, @@ -552,9 +572,11 @@ func makeLunaticEvidence( } func makeVote( + ctx context.Context, t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height int64, - round int32, step int, blockID types.BlockID, time time.Time) *types.Vote { - pubKey, err := val.GetPubKey(context.Background()) + round int32, step int, blockID types.BlockID, time time.Time, +) *types.Vote { + pubKey, err := val.GetPubKey(ctx) require.NoError(t, err) v := &types.Vote{ ValidatorAddress: pubKey.Address(), @@ -567,10 +589,8 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(context.Background(), chainID, vpb) - if err != nil { - panic(err) - } + err = val.SignVote(ctx, chainID, vpb) + require.NoError(t, err) v.Signature = vpb.Signature return v } @@ -591,12 +611,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } } -func orderPrivValsByValSet( - t *testing.T, vals *types.ValidatorSet, privVals []types.PrivValidator) []types.PrivValidator { +func orderPrivValsByValSet(ctx context.Context, t *testing.T, vals *types.ValidatorSet, privVals []types.PrivValidator) []types.PrivValidator { output := make([]types.PrivValidator, len(privVals)) for idx, v := range vals.Validators { for _, p := range privVals { - pubKey, err := p.GetPubKey(context.Background()) + pubKey, err := p.GetPubKey(ctx) require.NoError(t, err) if bytes.Equal(v.Address, pubKey.Address()) { output[idx] = p diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 90e6153411..6381ea888a 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -5,8 +5,10 @@ import ( "errors" "fmt" "net" + "net/http" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/inspect/rpc" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" @@ -32,7 +34,7 @@ type Inspector struct { config *config.RPCConfig indexerService *indexer.Service - eventBus *types.EventBus + eventBus *eventbus.EventBus logger log.Logger } @@ -40,20 +42,19 @@ type Inspector struct { // The Inspector type does not modify the state or block stores. // The sinks are used to enable block and transaction querying via the RPC server. // The caller is responsible for starting and stopping the Inspector service. -/// -//nolint:lll func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { - routes := rpc.Routes(*cfg, ss, bs, es, logger) - eb := types.NewEventBus() - eb.SetLogger(logger.With("module", "events")) - is := indexer.NewIndexerService(es, eb) - is.SetLogger(logger.With("module", "txindex")) + eb := eventbus.NewDefault(logger.With("module", "events")) + return &Inspector{ - routes: routes, - config: cfg, - logger: logger, - eventBus: eb, - indexerService: is, + routes: rpc.Routes(*cfg, ss, bs, es, logger), + config: cfg, + logger: logger, + eventBus: eb, + indexerService: indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eb, + Logger: logger.With("module", "txindex"), + }), } } @@ -83,26 +84,18 @@ func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { // Run starts the Inspector servers and blocks until the servers shut down. The passed // in context is used to control the lifecycle of the servers. func (ins *Inspector) Run(ctx context.Context) error { - err := ins.eventBus.Start() + err := ins.eventBus.Start(ctx) if err != nil { return fmt.Errorf("error starting event bus: %s", err) } - defer func() { - err := ins.eventBus.Stop() - if err != nil { - ins.logger.Error("event bus stopped with error", "err", err) - } - }() - err = ins.indexerService.Start() + defer ins.eventBus.Wait() + + err = ins.indexerService.Start(ctx) if err != nil { return fmt.Errorf("error starting indexer service: %s", err) } - defer func() { - err := ins.indexerService.Stop() - if err != nil { - ins.logger.Error("indexer service stopped with error", "err", err) - } - }() + defer ins.indexerService.Wait() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) } @@ -125,7 +118,7 @@ func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logg logger.Info("RPC HTTPS server starting", "address", listenerAddr, "certfile", certFile, "keyfile", keyFile) err := server.ListenAndServeTLS(tctx, certFile, keyFile) - if !errors.Is(err, net.ErrClosed) { + if !errors.Is(err, net.ErrClosed) && !errors.Is(err, http.ErrServerClosed) { return err } logger.Info("RPC HTTPS server stopped", "address", listenerAddr) @@ -136,7 +129,7 @@ func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logg g.Go(func() error { logger.Info("RPC HTTP server starting", "address", listenerAddr) err := server.ListenAndServe(tctx) - if !errors.Is(err, net.ErrClosed) { + if !errors.Is(err, net.ErrClosed) && !errors.Is(err, http.ErrServerClosed) { return err } logger.Info("RPC HTTP server stopped", "address", listenerAddr) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 9729324405..c4ec3695ed 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "os" + "runtime" "strings" "sync" "testing" @@ -16,18 +17,19 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) func TestInspectConstructor(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() @@ -41,7 +43,9 @@ func TestInspectConstructor(t *testing.T) { } func TestInspectRun(t *testing.T) { - cfg := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot("test") + require.NoError(t, err) + testLogger := log.TestingLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() @@ -49,13 +53,14 @@ func TestInspectRun(t *testing.T) { logger := testLogger.With(t.Name()) d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stoppedWG := &sync.WaitGroup{} stoppedWG.Add(1) go func() { + defer stoppedWG.Done() require.NoError(t, d.Run(ctx)) - stoppedWG.Done() }() + time.Sleep(100 * time.Millisecond) cancel() stoppedWG.Wait() }) @@ -76,6 +81,7 @@ func TestBlock(t *testing.T) { blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) rpcConfig := config.TestRPCConfig() l := log.TestingLogger() @@ -84,20 +90,17 @@ func TestBlock(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(1) - startedWG := &sync.WaitGroup{} - startedWG.Add(1) go func() { - startedWG.Done() defer wg.Done() require.NoError(t, d.Run(ctx)) }() // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch - startedWG.Wait() + runtime.Gosched() requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - resultBlock, err := cli.Block(context.Background(), &testHeight) + resultBlock, err := cli.Block(ctx, &testHeight) require.NoError(t, err) require.Equal(t, testBlock.Height, resultBlock.Block.Height) require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) @@ -111,7 +114,7 @@ func TestBlock(t *testing.T) { func TestTxSearch(t *testing.T) { testHash := []byte("test") testTx := []byte("tx") - testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash)) + testQuery := fmt.Sprintf("tx.hash = '%s'", string(testHash)) testTxResult := &abcitypes.TxResult{ Height: 1, Index: 100, @@ -149,7 +152,7 @@ func TestTxSearch(t *testing.T) { require.NoError(t, err) var page = 1 - resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + resultTxSearch, err := cli.TxSearch(ctx, testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) @@ -195,7 +198,7 @@ func TestTx(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Tx(context.Background(), testHash, false) + res, err := cli.Tx(ctx, testHash, false) require.NoError(t, err) require.Equal(t, types.Tx(testTx), res.Tx) @@ -220,6 +223,8 @@ func TestConsensusParams(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -241,7 +246,7 @@ func TestConsensusParams(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - params, err := cli.ConsensusParams(context.Background(), &testHeight) + params, err := cli.ConsensusParams(ctx, &testHeight) require.NoError(t, err) require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) @@ -271,6 +276,8 @@ func TestBlockResults(t *testing.T) { blockStoreMock.On("Height").Return(testHeight) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -292,7 +299,7 @@ func TestBlockResults(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockResults(context.Background(), &testHeight) + res, err := cli.BlockResults(ctx, &testHeight) require.NoError(t, err) require.Equal(t, res.TotalGasUsed, testGasUsed) @@ -317,6 +324,8 @@ func TestCommit(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -338,7 +347,7 @@ func TestCommit(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Commit(context.Background(), &testHeight) + res, err := cli.Commit(ctx, &testHeight) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, res.SignedHeader.Commit.Round, testRound) @@ -369,6 +378,8 @@ func TestBlockByHash(t *testing.T) { blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -390,7 +401,7 @@ func TestBlockByHash(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockByHash(context.Background(), testHash) + res, err := cli.BlockByHash(ctx, testHash) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, []byte(res.BlockID.Hash), testHash) @@ -420,6 +431,8 @@ func TestBlockchain(t *testing.T) { }) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -441,7 +454,7 @@ func TestBlockchain(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockchainInfo(context.Background(), 0, 100) + res, err := cli.BlockchainInfo(ctx, 0, 100) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) @@ -471,6 +484,8 @@ func TestValidators(t *testing.T) { blockStoreMock.On("Base").Return(int64(0)) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() l := log.TestingLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) @@ -495,7 +510,7 @@ func TestValidators(t *testing.T) { testPage := 1 testPerPage := 100 - res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + res, err := cli.Validators(ctx, &testHeight, &testPage, &testPerPage) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testVotingPower, res.Validators[0].VotingPower) @@ -555,7 +570,7 @@ func TestBlockSearch(t *testing.T) { testPage := 1 testPerPage := 100 testOrderBy := "desc" - res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + res, err := cli.BlockSearch(ctx, testQuery, &testPage, &testPerPage, testOrderBy) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 3043ba6b34..40f0d5d26f 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -8,14 +8,12 @@ import ( "github.com/rs/cors" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - "github.com/tendermint/tendermint/types" ) // Server defines parameters for running an Inspector rpc server. @@ -26,29 +24,30 @@ type Server struct { Config *config.RPCConfig } +type eventBusUnsubscriber interface { + UnsubscribeAll(ctx context.Context, subscriber string) error +} + // Routes returns the set of routes used by the Inspector server. -// -//nolint: lll func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, es []indexer.EventSink, logger log.Logger) core.RoutesMap { env := &core.Environment{ - Config: cfg, - EventSinks: es, - StateStore: s, - BlockStore: bs, - ConsensusReactor: waitSyncCheckerImpl{}, - Logger: logger, + Config: cfg, + EventSinks: es, + StateStore: s, + BlockStore: bs, + Logger: logger, } return core.RoutesMap{ - "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), - "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height", true), - "block": server.NewRPCFunc(env.Block, "height", true), - "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash", true), - "block_results": server.NewRPCFunc(env.BlockResults, "height", true), - "commit": server.NewRPCFunc(env.Commit, "height", true), - "validators": server.NewRPCFunc(env.Validators, "height,page,per_page", true), - "tx": server.NewRPCFunc(env.Tx, "hash,prove", true), - "tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), - "block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), + "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight", "maxHeight"), + "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height"), + "block": server.NewRPCFunc(env.Block, "height"), + "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash"), + "block_results": server.NewRPCFunc(env.BlockResults, "height"), + "commit": server.NewRPCFunc(env.Commit, "height"), + "validators": server.NewRPCFunc(env.Validators, "height", "page", "per_page"), + "tx": server.NewRPCFunc(env.Tx, "hash", "prove"), + "tx_search": server.NewRPCFunc(env.TxSearch, "query", "prove", "page", "per_page", "order_by"), + "block_search": server.NewRPCFunc(env.BlockSearch, "query", "page", "per_page", "order_by"), } } @@ -59,7 +58,7 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg mux := http.NewServeMux() wmLogger := logger.With("protocol", "websocket") - var eventBus types.EventBusSubscriber + var eventBus eventBusUnsubscriber websocketDisconnectFn := func(remoteAddr string) { err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) @@ -67,10 +66,9 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } } - wm := server.NewWebsocketManager(routes, + wm := server.NewWebsocketManager(logger, routes, server.OnDisconnect(websocketDisconnectFn), server.ReadLimit(rpcConfig.MaxBodyBytes)) - wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) server.RegisterRPCFuncs(mux, routes, logger) @@ -91,16 +89,6 @@ func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler { return h } -type waitSyncCheckerImpl struct{} - -func (waitSyncCheckerImpl) WaitSync() bool { - return false -} - -func (waitSyncCheckerImpl) GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) { - return nil, false -} - // ListenAndServe listens on the address specified in srv.Addr and handles any // incoming requests over HTTP using the Inspector rpc handler specified on the server. func (srv *Server) ListenAndServe(ctx context.Context) error { @@ -112,7 +100,8 @@ func (srv *Server) ListenAndServe(ctx context.Context) error { <-ctx.Done() listener.Close() }() - return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) + + return server.Serve(ctx, listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) } // ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles @@ -126,7 +115,7 @@ func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile stri <-ctx.Done() listener.Close() }() - return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) + return server.ServeTLS(ctx, listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) } func serverRPCConfig(r *config.RPCConfig) *server.Config { diff --git a/internal/jsontypes/jsontypes.go b/internal/jsontypes/jsontypes.go new file mode 100644 index 0000000000..69405da1b4 --- /dev/null +++ b/internal/jsontypes/jsontypes.go @@ -0,0 +1,121 @@ +// Package jsontypes supports decoding for interface types whose concrete +// implementations need to be stored as JSON. To do this, concrete values are +// packaged in wrapper objects having the form: +// +// { +// "type": "", +// "value": +// } +// +// This package provides a registry for type tag strings and functions to +// encode and decode wrapper objects. +package jsontypes + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +// The Tagged interface must be implemented by a type in order to register it +// with the jsontypes package. The TypeTag method returns a string label that +// is used to distinguish objects of that type. +type Tagged interface { + TypeTag() string +} + +// registry records the mapping from type tags to value types. +var registry = struct { + types map[string]reflect.Type +}{types: make(map[string]reflect.Type)} + +// register adds v to the type registry. It reports an error if the tag +// returned by v is already registered. +func register(v Tagged) error { + tag := v.TypeTag() + if t, ok := registry.types[tag]; ok { + return fmt.Errorf("type tag %q already registered to %v", tag, t) + } + registry.types[tag] = reflect.TypeOf(v) + return nil +} + +// MustRegister adds v to the type registry. It will panic if the tag returned +// by v is already registered. This function is meant for use during program +// initialization. +func MustRegister(v Tagged) { + if err := register(v); err != nil { + panic(err) + } +} + +type wrapper struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} + +// Marshal marshals a JSON wrapper object containing v. If v == nil, Marshal +// returns the JSON "null" value without error. +func Marshal(v Tagged) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + return json.Marshal(wrapper{ + Type: v.TypeTag(), + Value: data, + }) +} + +// Unmarshal unmarshals a JSON wrapper object into v. It reports an error if +// the data do not encode a valid wrapper object, if the wrapper's type tag is +// not registered with jsontypes, or if the resulting value is not compatible +// with the type of v. +func Unmarshal(data []byte, v interface{}) error { + // Verify that the target is some kind of pointer. + target := reflect.ValueOf(v) + if target.Kind() != reflect.Ptr { + return fmt.Errorf("target %T is not a pointer", v) + } else if target.IsZero() { + return fmt.Errorf("target is a nil %T", v) + } + baseType := target.Type().Elem() + if isNull(data) { + target.Elem().Set(reflect.Zero(baseType)) + return nil + } + + var w wrapper + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DisallowUnknownFields() + if err := dec.Decode(&w); err != nil { + return fmt.Errorf("invalid type wrapper: %w", err) + } + typ, ok := registry.types[w.Type] + if !ok { + return fmt.Errorf("unknown type tag for %T: %q", v, w.Type) + } + if typ.AssignableTo(baseType) { + // ok: registered type is directly assignable to the target + } else if typ.Kind() == reflect.Ptr && typ.Elem().AssignableTo(baseType) { + typ = typ.Elem() + // ok: registered type is a pointer to a value assignable to the target + } else { + return fmt.Errorf("type %v is not assignable to %v", typ, baseType) + } + obj := reflect.New(typ) // we need a pointer to unmarshal + if err := json.Unmarshal(w.Value, obj.Interface()); err != nil { + return fmt.Errorf("decoding wrapped value: %w", err) + } + target.Elem().Set(obj.Elem()) + return nil +} + +// isNull reports true if data is empty or is the JSON "null" value. +func isNull(data []byte) bool { + return len(data) == 0 || bytes.Equal(data, []byte("null")) +} diff --git a/internal/jsontypes/jsontypes_test.go b/internal/jsontypes/jsontypes_test.go new file mode 100644 index 0000000000..223e25c343 --- /dev/null +++ b/internal/jsontypes/jsontypes_test.go @@ -0,0 +1,188 @@ +package jsontypes_test + +import ( + "testing" + + "github.com/tendermint/tendermint/internal/jsontypes" +) + +type testPtrType struct { + Field string `json:"field"` +} + +func (*testPtrType) TypeTag() string { return "test/PointerType" } +func (t *testPtrType) Value() string { return t.Field } + +type testBareType struct { + Field string `json:"field"` +} + +func (testBareType) TypeTag() string { return "test/BareType" } +func (t testBareType) Value() string { return t.Field } + +type fielder interface{ Value() string } + +func TestRoundTrip(t *testing.T) { + t.Run("MustRegister_ok", func(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Fatalf("Registration panicked: %v", x) + } + }() + jsontypes.MustRegister((*testPtrType)(nil)) + jsontypes.MustRegister(testBareType{}) + }) + + t.Run("MustRegister_fail", func(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Logf("Got expected panic: %v", x) + } + }() + jsontypes.MustRegister((*testPtrType)(nil)) + t.Fatal("Registration should not have succeeded") + }) + + t.Run("Marshal_nilTagged", func(t *testing.T) { + bits, err := jsontypes.Marshal(nil) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + if got := string(bits); got != "null" { + t.Errorf("Marshal nil: got %#q, want null", got) + } + }) + + t.Run("RoundTrip_pointerType", func(t *testing.T) { + const wantEncoded = `{"type":"test/PointerType","value":{"field":"hello"}}` + + obj := testPtrType{Field: "hello"} + bits, err := jsontypes.Marshal(&obj) + if err != nil { + t.Fatalf("Marshal %T failed: %v", obj, err) + } + if got := string(bits); got != wantEncoded { + t.Errorf("Marshal %T: got %#q, want %#q", obj, got, wantEncoded) + } + + var cmp testPtrType + if err := jsontypes.Unmarshal(bits, &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", string(bits), err) + } + if obj != cmp { + t.Errorf("Unmarshal %#q: got %+v, want %+v", string(bits), cmp, obj) + } + }) + + t.Run("RoundTrip_bareType", func(t *testing.T) { + const wantEncoded = `{"type":"test/BareType","value":{"field":"hello"}}` + + obj := testBareType{Field: "hello"} + bits, err := jsontypes.Marshal(&obj) + if err != nil { + t.Fatalf("Marshal %T failed: %v", obj, err) + } + if got := string(bits); got != wantEncoded { + t.Errorf("Marshal %T: got %#q, want %#q", obj, got, wantEncoded) + } + + var cmp testBareType + if err := jsontypes.Unmarshal(bits, &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", string(bits), err) + } + if obj != cmp { + t.Errorf("Unmarshal %#q: got %+v, want %+v", string(bits), cmp, obj) + } + }) + + t.Run("Unmarshal_nilPointer", func(t *testing.T) { + var obj *testBareType + + // Unmarshaling to a nil pointer target should report an error. + if err := jsontypes.Unmarshal([]byte(`null`), obj); err == nil { + t.Errorf("Unmarshal nil: got %+v, wanted error", obj) + } else { + t.Logf("Unmarshal correctly failed: %v", err) + } + }) + + t.Run("Unmarshal_bareType", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/BareType","value":{"field":"` + want + `"}}` + + var obj testBareType + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if obj.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", obj.Field, want) + } + }) + + t.Run("Unmarshal_bareType_interface", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/BareType","value":{"field":"` + want + `"}}` + + var obj fielder + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if got := obj.Value(); got != want { + t.Errorf("Unmarshal result: got %q, want %q", got, want) + } + }) + + t.Run("Unmarshal_pointerType", func(t *testing.T) { + const want = "bazquux" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + var obj testPtrType + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if obj.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", obj.Field, want) + } + }) + + t.Run("Unmarshal_pointerType_interface", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + var obj fielder + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if got := obj.Value(); got != want { + t.Errorf("Unmarshal result: got %q, want %q", got, want) + } + }) + + t.Run("Unmarshal_unknownTypeTag", func(t *testing.T) { + const input = `{"type":"test/Nonesuch","value":null}` + + // An unregistered type tag in a valid envelope should report an error. + var obj interface{} + if err := jsontypes.Unmarshal([]byte(input), &obj); err == nil { + t.Errorf("Unmarshal: got %+v, wanted error", obj) + } else { + t.Logf("Unmarshal correctly failed: %v", err) + } + }) + + t.Run("Unmarshal_similarTarget", func(t *testing.T) { + const want = "zootie-zoot-zoot" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + // The target has a compatible (i.e., assignable) shape to the registered + // type. This should work even though it's not the original named type. + var cmp struct { + Field string `json:"field"` + } + if err := jsontypes.Unmarshal([]byte(input), &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", input, err) + } else if cmp.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", cmp.Field, want) + } + }) +} diff --git a/libs/async/async.go b/internal/libs/async/async.go similarity index 100% rename from libs/async/async.go rename to internal/libs/async/async.go diff --git a/libs/async/async_test.go b/internal/libs/async/async_test.go similarity index 100% rename from libs/async/async_test.go rename to internal/libs/async/async_test.go diff --git a/internal/libs/autofile/autofile.go b/internal/libs/autofile/autofile.go index 10cc04a28f..6f38fc43bb 100644 --- a/internal/libs/autofile/autofile.go +++ b/internal/libs/autofile/autofile.go @@ -1,6 +1,9 @@ package autofile import ( + "context" + "errors" + "fmt" "os" "os/signal" "path/filepath" @@ -38,6 +41,10 @@ const ( autoFilePerms = os.FileMode(0600) ) +// errAutoFileClosed is reported when operations attempt to use an autofile +// after it has been closed. +var errAutoFileClosed = errors.New("autofile is closed") + // AutoFile automatically closes and re-opens file for writing. The file is // automatically setup to close itself every 1s and upon receiving SIGHUP. // @@ -46,81 +53,98 @@ type AutoFile struct { ID string Path string - closeTicker *time.Ticker - closeTickerStopc chan struct{} // closed when closeTicker is stopped - hupc chan os.Signal + closeTicker *time.Ticker // signals periodic close + cancel func() // cancels the lifecycle context - mtx sync.Mutex - file *os.File + mtx sync.Mutex // guards the fields below + closed bool // true when the the autofile is no longer usable + file *os.File // the underlying file (may be nil) } // OpenAutoFile creates an AutoFile in the path (with random ID). If there is // an error, it will be of type *PathError or *ErrPermissionsChanged (if file's // permissions got changed (should be 0600)). -func OpenAutoFile(path string) (*AutoFile, error) { +func OpenAutoFile(ctx context.Context, path string) (*AutoFile, error) { var err error path, err = filepath.Abs(path) if err != nil { return nil, err } + + ctx, cancel := context.WithCancel(ctx) af := &AutoFile{ - ID: tmrand.Str(12) + ":" + path, - Path: path, - closeTicker: time.NewTicker(autoFileClosePeriod), - closeTickerStopc: make(chan struct{}), + ID: tmrand.Str(12) + ":" + path, + Path: path, + closeTicker: time.NewTicker(autoFileClosePeriod), + cancel: cancel, } if err := af.openFile(); err != nil { af.Close() return nil, err } - // Close file on SIGHUP. - af.hupc = make(chan os.Signal, 1) - signal.Notify(af.hupc, syscall.SIGHUP) + // Set up a SIGHUP handler to forcibly flush and close the filehandle. + // This forces the next operation to re-open the underlying path. + hupc := make(chan os.Signal, 1) + signal.Notify(hupc, syscall.SIGHUP) go func() { - for range af.hupc { - _ = af.closeFile() + defer close(hupc) + for { + select { + case <-hupc: + _ = af.closeFile() + case <-ctx.Done(): + return + } } }() - go af.closeFileRoutine() + go af.closeFileRoutine(ctx) return af, nil } -// Close shuts down the closing goroutine, SIGHUP handler and closes the -// AutoFile. +// Close shuts down the service goroutine and marks af as invalid. Operations +// on af after Close will report an error. func (af *AutoFile) Close() error { - af.closeTicker.Stop() - close(af.closeTickerStopc) - if af.hupc != nil { - close(af.hupc) - } - return af.closeFile() + return af.withLock(func() error { + af.cancel() // signal the close service to stop + af.closed = true // mark the file as invalid + return af.unsyncCloseFile() + }) } -func (af *AutoFile) closeFileRoutine() { +func (af *AutoFile) closeFileRoutine(ctx context.Context) { for { select { + case <-ctx.Done(): + _ = af.Close() + return case <-af.closeTicker.C: _ = af.closeFile() - case <-af.closeTickerStopc: - return } } } func (af *AutoFile) closeFile() (err error) { - af.mtx.Lock() - defer af.mtx.Unlock() + return af.withLock(af.unsyncCloseFile) +} - file := af.file - if file == nil { - return nil +// unsyncCloseFile closes the underlying filehandle if one is open, and reports +// any error it returns. The caller must hold af.mtx exclusively. +func (af *AutoFile) unsyncCloseFile() error { + if fp := af.file; fp != nil { + af.file = nil + return fp.Close() } + return nil +} - af.file = nil - return file.Close() +// withLock runs f while holding af.mtx, and reports any error it returns. +func (af *AutoFile) withLock(f func() error) error { + af.mtx.Lock() + defer af.mtx.Unlock() + return f() } // Write writes len(b) bytes to the AutoFile. It returns the number of bytes @@ -130,6 +154,9 @@ func (af *AutoFile) closeFile() (err error) { func (af *AutoFile) Write(b []byte) (n int, err error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.closed { + return 0, fmt.Errorf("write: %w", errAutoFileClosed) + } if af.file == nil { if err = af.openFile(); err != nil { @@ -144,19 +171,19 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { // Sync commits the current contents of the file to stable storage. Typically, // this means flushing the file system's in-memory copy of recently written // data to disk. -// Opens AutoFile if needed. func (af *AutoFile) Sync() error { - af.mtx.Lock() - defer af.mtx.Unlock() - - if af.file == nil { - if err := af.openFile(); err != nil { - return err + return af.withLock(func() error { + if af.closed { + return fmt.Errorf("sync: %w", errAutoFileClosed) + } else if af.file == nil { + return nil // nothing to sync } - } - return af.file.Sync() + return af.file.Sync() + }) } +// openFile unconditionally replaces af.file with a new filehandle on the path. +// The caller must hold af.mtx exclusively. func (af *AutoFile) openFile() error { file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, autoFilePerms) if err != nil { @@ -179,6 +206,9 @@ func (af *AutoFile) openFile() error { func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.closed { + return 0, fmt.Errorf("size: %w", errAutoFileClosed) + } if af.file == nil { if err := af.openFile(); err != nil { diff --git a/internal/libs/autofile/autofile_test.go b/internal/libs/autofile/autofile_test.go index c2442a56f9..dc5ba06825 100644 --- a/internal/libs/autofile/autofile_test.go +++ b/internal/libs/autofile/autofile_test.go @@ -1,7 +1,7 @@ package autofile import ( - "io/ioutil" + "context" "os" "path/filepath" "syscall" @@ -13,6 +13,9 @@ import ( ) func TestSIGHUP(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + origDir, err := os.Getwd() require.NoError(t, err) t.Cleanup(func() { @@ -22,7 +25,7 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := ioutil.TempDir("", "sighup_test") + dir, err := os.MkdirTemp("", "sighup_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(dir) @@ -31,7 +34,7 @@ func TestSIGHUP(t *testing.T) { // Create an AutoFile in the temporary directory name := "sighup_test" - af, err := OpenAutoFile(name) + af, err := OpenAutoFile(ctx, name) require.NoError(t, err) require.True(t, filepath.IsAbs(af.Path)) @@ -45,7 +48,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory - otherDir, err := ioutil.TempDir("", "sighup_test_other") + otherDir, err := os.MkdirTemp("", "sighup_test_other") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(otherDir) }) require.NoError(t, os.Chdir(otherDir)) @@ -72,7 +75,7 @@ func TestSIGHUP(t *testing.T) { } // The current directory should be empty - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) } @@ -80,7 +83,7 @@ func TestSIGHUP(t *testing.T) { // // Manually modify file permissions, close, and reopen using autofile: // // We expect the file permissions to be changed back to the intended perms. // func TestOpenAutoFilePerms(t *testing.T) { -// file, err := ioutil.TempFile("", "permission_test") +// file, err := os.CreateTemp("", "permission_test") // require.NoError(t, err) // err = file.Close() // require.NoError(t, err) @@ -105,13 +108,16 @@ func TestSIGHUP(t *testing.T) { // } func TestAutoFileSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // First, create an AutoFile writing to a tempfile dir - f, err := ioutil.TempFile("", "sighup_test") + f, err := os.CreateTemp("", "sighup_test") require.NoError(t, err) require.NoError(t, f.Close()) // Here is the actual AutoFile. - af, err := OpenAutoFile(f.Name()) + af, err := OpenAutoFile(ctx, f.Name()) require.NoError(t, err) // 1. Empty file @@ -128,7 +134,7 @@ func TestAutoFileSize(t *testing.T) { require.NoError(t, err) // 3. Not existing file - require.NoError(t, af.Close()) + require.NoError(t, af.closeFile()) require.NoError(t, os.Remove(f.Name())) size, err = af.Size() require.EqualValues(t, 0, size, "Expected a new file to be empty") @@ -139,7 +145,7 @@ func TestAutoFileSize(t *testing.T) { } func mustReadFile(t *testing.T, filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) require.NoError(t, err) return fileBytes diff --git a/internal/libs/autofile/cmd/logjack.go b/internal/libs/autofile/cmd/logjack.go index 1aa8b6a113..a9f6cf7667 100644 --- a/internal/libs/autofile/cmd/logjack.go +++ b/internal/libs/autofile/cmd/logjack.go @@ -1,15 +1,18 @@ package main import ( + "context" "flag" "fmt" "io" "os" + "os/signal" "strconv" "strings" + "syscall" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/log" ) const Version = "0.0.1" @@ -32,21 +35,10 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo return } -type fmtLogger struct{} - -func (fmtLogger) Info(msg string, keyvals ...interface{}) { - strs := make([]string, len(keyvals)) - for i, kv := range keyvals { - strs[i] = fmt.Sprintf("%v", kv) - } - fmt.Printf("%s %s\n", msg, strings.Join(strs, ",")) -} - func main() { - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(fmtLogger{}, func() { - fmt.Println("logjack shutting down") - }) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM) + defer cancel() + defer func() { fmt.Println("logjack shutting down") }() // Read options headPath, chopSize, limitSize, version := parseFlags() @@ -56,13 +48,13 @@ func main() { } // Open Group - group, err := auto.OpenGroup(headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) + group, err := auto.OpenGroup(ctx, log.NewNopLogger(), headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) if err != nil { fmt.Printf("logjack couldn't create output file %v\n", headPath) os.Exit(1) } - if err = group.Start(); err != nil { + if err = group.Start(ctx); err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) os.Exit(1) } @@ -72,14 +64,10 @@ func main() { for { n, err := os.Stdin.Read(buf) if err != nil { - if err := group.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "logjack stopped with error %v\n", headPath) - os.Exit(1) - } if err == io.EOF { os.Exit(0) } else { - fmt.Println("logjack errored") + fmt.Println("logjack errored:", err.Error()) os.Exit(1) } } diff --git a/internal/libs/autofile/group.go b/internal/libs/autofile/group.go index 23f27c59bd..1b4418d597 100644 --- a/internal/libs/autofile/group.go +++ b/internal/libs/autofile/group.go @@ -2,6 +2,7 @@ package autofile import ( "bufio" + "context" "errors" "fmt" "io" @@ -13,6 +14,7 @@ import ( "sync" "time" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -53,6 +55,7 @@ assuming that marker lines are written occasionally. */ type Group struct { service.BaseService + logger log.Logger ID string Head *AutoFile // The head AutoFile to write to @@ -77,17 +80,18 @@ type Group struct { // OpenGroup creates a new Group with head at headPath. It returns an error if // it fails to open head file. -func OpenGroup(headPath string, groupOptions ...func(*Group)) (*Group, error) { +func OpenGroup(ctx context.Context, logger log.Logger, headPath string, groupOptions ...func(*Group)) (*Group, error) { dir, err := filepath.Abs(filepath.Dir(headPath)) if err != nil { return nil, err } - head, err := OpenAutoFile(headPath) + head, err := OpenAutoFile(ctx, headPath) if err != nil { return nil, err } g := &Group{ + logger: logger, ID: "group:" + head.ID, Head: head, headBuf: bufio.NewWriterSize(head, 4096*10), @@ -104,7 +108,7 @@ func OpenGroup(headPath string, groupOptions ...func(*Group)) (*Group, error) { option(g) } - g.BaseService = *service.NewBaseService(nil, "Group", g) + g.BaseService = *service.NewBaseService(logger, "Group", g) gInfo := g.readGroupInfo() g.minIndex = gInfo.MinIndex @@ -135,9 +139,9 @@ func GroupTotalSizeLimit(limit int64) func(*Group) { // OnStart implements service.Service by starting the goroutine that checks file // and group limits. -func (g *Group) OnStart() error { +func (g *Group) OnStart(ctx context.Context) error { g.ticker = time.NewTicker(g.groupCheckDuration) - go g.processTicks() + go g.processTicks(ctx) return nil } @@ -146,7 +150,7 @@ func (g *Group) OnStart() error { func (g *Group) OnStop() { g.ticker.Stop() if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushing to disk", "err", err) + g.logger.Error("error flushing to disk", "err", err) } } @@ -160,11 +164,11 @@ func (g *Group) Wait() { // Close closes the head file. The group must be stopped by this moment. func (g *Group) Close() { if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushing to disk", "err", err) + g.logger.Error("error flushing to disk", "err", err) } g.mtx.Lock() - _ = g.Head.closeFile() + _ = g.Head.Close() g.mtx.Unlock() } @@ -236,38 +240,41 @@ func (g *Group) FlushAndSync() error { return err } -func (g *Group) processTicks() { +func (g *Group) processTicks(ctx context.Context) { defer close(g.doneProcessTicks) + for { select { - case <-g.ticker.C: - g.checkHeadSizeLimit() - g.checkTotalSizeLimit() - case <-g.Quit(): + case <-ctx.Done(): return + case <-g.ticker.C: + g.checkHeadSizeLimit(ctx) + g.checkTotalSizeLimit(ctx) } } } // NOTE: this function is called manually in tests. -func (g *Group) checkHeadSizeLimit() { +func (g *Group) checkHeadSizeLimit(ctx context.Context) { limit := g.HeadSizeLimit() if limit == 0 { return } size, err := g.Head.Size() if err != nil { - g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) + g.logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) return } if size >= limit { - g.RotateFile() + g.rotateFile(ctx) } } -func (g *Group) checkTotalSizeLimit() { - limit := g.TotalSizeLimit() - if limit == 0 { +func (g *Group) checkTotalSizeLimit(ctx context.Context) { + g.mtx.Lock() + defer g.mtx.Unlock() + + if g.totalSizeLimit == 0 { return } @@ -275,32 +282,35 @@ func (g *Group) checkTotalSizeLimit() { totalSize := gInfo.TotalSize for i := 0; i < maxFilesToRemove; i++ { index := gInfo.MinIndex + i - if totalSize < limit { + if totalSize < g.totalSizeLimit { return } if index == gInfo.MaxIndex { // Special degenerate case, just do nothing. - g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path) + g.logger.Error("Group's head may grow without bound", "head", g.Head.Path) return } pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) fInfo, err := os.Stat(pathToRemove) if err != nil { - g.Logger.Error("Failed to fetch info for file", "file", pathToRemove) + g.logger.Error("Failed to fetch info for file", "file", pathToRemove) continue } - err = os.Remove(pathToRemove) - if err != nil { - g.Logger.Error("Failed to remove path", "path", pathToRemove) + + if ctx.Err() != nil { + return + } + + if err = os.Remove(pathToRemove); err != nil { + g.logger.Error("Failed to remove path", "path", pathToRemove) return } totalSize -= fInfo.Size() } } -// RotateFile causes group to close the current head and assign it some index. -// Note it does not create a new head. -func (g *Group) RotateFile() { +// rotateFile causes group to close the current head and assign it some index. +func (g *Group) rotateFile(ctx context.Context) { g.mtx.Lock() defer g.mtx.Unlock() @@ -309,17 +319,25 @@ func (g *Group) RotateFile() { if err := g.headBuf.Flush(); err != nil { panic(err) } - if err := g.Head.Sync(); err != nil { panic(err) } + err := g.Head.withLock(func() error { + if err := ctx.Err(); err != nil { + return err + } - if err := g.Head.closeFile(); err != nil { - panic(err) - } + if err := g.Head.unsyncCloseFile(); err != nil { + return err + } - indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) - if err := os.Rename(headPath, indexPath); err != nil { + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + return os.Rename(headPath, indexPath) + }) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + if err != nil { panic(err) } diff --git a/internal/libs/autofile/group_test.go b/internal/libs/autofile/group_test.go index 0981923eb4..f6b3eaab6a 100644 --- a/internal/libs/autofile/group_test.go +++ b/internal/libs/autofile/group_test.go @@ -1,8 +1,8 @@ package autofile import ( + "context" "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -10,18 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" ) -func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group { +func createTestGroupWithHeadSizeLimit(ctx context.Context, t *testing.T, logger log.Logger, headSizeLimit int64) *Group { testID := tmrand.Str(12) testDir := "_test_" + testID err := tmos.EnsureDir(testDir, 0700) require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" - g, err := OpenGroup(headPath, GroupHeadSizeLimit(headSizeLimit)) + g, err := OpenGroup(ctx, logger, headPath, GroupHeadSizeLimit(headSizeLimit)) require.NoError(t, err, "Error opening Group") require.NotEqual(t, nil, g, "Failed to create Group") @@ -43,7 +44,12 @@ func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, tota } func TestCheckHeadSizeLimit(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 1000*1000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 1000*1000) // At first, there are no files. assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) @@ -58,7 +64,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. @@ -68,7 +74,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { require.NoError(t, err) // Calling checkHeadSizeLimit this time rolls it. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. @@ -78,7 +84,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { require.NoError(t, err) // Calling checkHeadSizeLimit does nothing. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) // Write 1000 bytes 999 times. @@ -91,7 +97,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) // Write 1000 more bytes. @@ -102,7 +108,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Cleanup @@ -110,7 +116,11 @@ func TestCheckHeadSizeLimit(t *testing.T) { } func TestRotateFile(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) // Create a different temporary directory and move into it, to make sure // relative paths are resolved at Group creation @@ -122,7 +132,7 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "rotate_test") + dir, err := os.MkdirTemp("", "rotate_test") require.NoError(t, err) defer os.RemoveAll(dir) err = os.Chdir(dir) @@ -140,7 +150,7 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) err = g.WriteLine("Line 4") require.NoError(t, err) err = g.WriteLine("Line 5") @@ -151,21 +161,21 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) // Read g.Head.Path+"000" - body1, err := ioutil.ReadFile(g.Head.Path + ".000") + body1, err := os.ReadFile(g.Head.Path + ".000") assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path - body2, err := ioutil.ReadFile(g.Head.Path) + body2, err := os.ReadFile(g.Head.Path) assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } // Make sure there are no files in the current, temporary directory - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) @@ -174,7 +184,12 @@ func TestRotateFile(t *testing.T) { } func TestWrite(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) written := []byte("Medusa") _, err := g.Write(written) @@ -197,14 +212,19 @@ func TestWrite(t *testing.T) { // test that Read reads the required amount of bytes from all the files in the // group and returns no error if n == size of the given slice. func TestGroupReaderRead(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) professor := []byte("Professor Monster") _, err := g.Write(professor) require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) frankenstein := []byte("Frankenstein's Monster") _, err = g.Write(frankenstein) require.NoError(t, err) @@ -230,14 +250,19 @@ func TestGroupReaderRead(t *testing.T) { // test that Read returns an error if number of bytes read < size of // the given slice. Subsequent call should return 0, io.EOF. func TestGroupReaderRead2(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) professor := []byte("Professor Monster") _, err := g.Write(professor) require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) frankenstein := []byte("Frankenstein's Monster") frankensteinPart := []byte("Frankenstein") _, err = g.Write(frankensteinPart) // note writing only a part @@ -265,7 +290,11 @@ func TestGroupReaderRead2(t *testing.T) { } func TestMinIndex(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") @@ -274,7 +303,11 @@ func TestMinIndex(t *testing.T) { } func TestMaxIndex(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") @@ -282,7 +315,7 @@ func TestMaxIndex(t *testing.T) { require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index 6cf5157060..145c4e4f10 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -14,8 +14,6 @@ to ensure garbage collection of removed elements. import ( "fmt" "sync" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // MaxLength is the max allowed number of elements a linked list is @@ -44,7 +42,7 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx tmsync.RWMutex + mtx sync.RWMutex prev *CElement prevWg *sync.WaitGroup prevWaitCh chan struct{} @@ -220,7 +218,7 @@ func (e *CElement) SetRemoved() { // Operations are goroutine-safe. // Panics if length grows beyond the max. type CList struct { - mtx tmsync.RWMutex + mtx sync.RWMutex wg *sync.WaitGroup waitCh chan struct{} head *CElement // first element diff --git a/internal/libs/clist/clist_test.go b/internal/libs/clist/clist_test.go index 0c0a7a86e9..a0482fc402 100644 --- a/internal/libs/clist/clist_test.go +++ b/internal/libs/clist/clist_test.go @@ -67,7 +67,7 @@ func TestSmall(t *testing.T) { func TestGCFifo(t *testing.T) { - const numElements = 1000000 + const numElements = 10000 l := New() gcCount := 0 @@ -100,7 +100,7 @@ func TestGCFifo(t *testing.T) { tickerDoneCh := make(chan struct{}) go func() { defer close(tickerDoneCh) - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(250 * time.Millisecond) for { select { case <-ticker.C: @@ -127,7 +127,7 @@ func TestGCFifo(t *testing.T) { func TestGCRandom(t *testing.T) { - const numElements = 1000000 + const numElements = 10000 l := New() gcCount := 0 @@ -163,7 +163,7 @@ func TestGCRandom(t *testing.T) { tickerDoneCh := make(chan struct{}) go func() { defer close(tickerDoneCh) - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(250 * time.Millisecond) for { select { case <-ticker.C: @@ -282,14 +282,14 @@ func TestWaitChan(t *testing.T) { done := make(chan struct{}) pushed := 0 go func() { + defer close(done) for i := 1; i < 100; i++ { l.PushBack(i) pushed++ - time.Sleep(time.Duration(mrand.Intn(25)) * time.Millisecond) + time.Sleep(time.Duration(mrand.Intn(20)) * time.Millisecond) } // apply a deterministic pause so the counter has time to catch up - time.Sleep(25 * time.Millisecond) - close(done) + time.Sleep(20 * time.Millisecond) }() next := el @@ -305,7 +305,7 @@ FOR_LOOP: } case <-done: break FOR_LOOP - case <-time.After(10 * time.Second): + case <-time.After(2 * time.Second): t.Fatal("max execution time") } } @@ -326,7 +326,11 @@ FOR_LOOP2: if prev == nil { t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") } - case <-time.After(3 * time.Second): + if pushed == seen { + break FOR_LOOP2 + } + + case <-time.After(250 * time.Millisecond): break FOR_LOOP2 } } diff --git a/internal/libs/fail/fail.go b/internal/libs/fail/fail.go deleted file mode 100644 index 03a2ca6682..0000000000 --- a/internal/libs/fail/fail.go +++ /dev/null @@ -1,40 +0,0 @@ -package fail - -import ( - "fmt" - "os" - "strconv" -) - -func envSet() int { - callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") - - if callIndexToFailS == "" { - return -1 - } - - var err error - callIndexToFail, err := strconv.Atoi(callIndexToFailS) - if err != nil { - return -1 - } - - return callIndexToFail -} - -// Fail when FAIL_TEST_INDEX == callIndex -var callIndex int // indexes Fail calls - -func Fail() { - callIndexToFail := envSet() - if callIndexToFail < 0 { - return - } - - if callIndex == callIndexToFail { - fmt.Printf("*** fail-test %d ***\n", callIndex) - os.Exit(1) - } - - callIndex++ -} diff --git a/internal/libs/flowrate/README.md b/internal/libs/flowrate/README.md deleted file mode 100644 index caed79aa33..0000000000 --- a/internal/libs/flowrate/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Data Flow Rate Control -====================== - -To download and install this package run: - -go get github.com/mxk/go-flowrate/flowrate - -The documentation is available at: - - diff --git a/internal/libs/flowrate/flowrate.go b/internal/libs/flowrate/flowrate.go index 522c46cc73..2a053805c2 100644 --- a/internal/libs/flowrate/flowrate.go +++ b/internal/libs/flowrate/flowrate.go @@ -8,14 +8,13 @@ package flowrate import ( "math" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // Monitor monitors and limits the transfer rate of a data stream. type Monitor struct { - mu tmsync.Mutex // Mutex guarding access to all internal fields + mu sync.Mutex // Mutex guarding access to all internal fields active bool // Flag indicating an active transfer start time.Duration // Transfer start time (clock() value) bytes int64 // Total number of bytes transferred diff --git a/internal/libs/flowrate/io.go b/internal/libs/flowrate/io.go deleted file mode 100644 index fbe0909725..0000000000 --- a/internal/libs/flowrate/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowrate: flow rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/internal/libs/flowrate/io_test.go b/internal/libs/flowrate/io_test.go deleted file mode 100644 index 4d7de417e4..0000000000 --- a/internal/libs/flowrate/io_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "bytes" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, - {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, - {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, - {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - } - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _300ms { - // Explanation for `rt < _300ms` (as opposed to `< _400ms`) - // - // |<-- start | | - // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms - // sends: 20|20 |20 |20 |20# - // - // NOTE: The '#' symbol can thus happen before 400ms is up. - // Thus, we can only panic if rt < _300ms. - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, - {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, - } - - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} - -const maxDeviationForDuration = 50 * time.Millisecond -const maxDeviationForRate int64 = 50 - -// statusesAreEqual returns true if s1 is equal to s2. Equality here means -// general equality of fields except for the duration and rates, which can -// drift due to unpredictable delays (e.g. thread wakes up 25ms after -// `time.Sleep` has ended). -func statusesAreEqual(s1 *Status, s2 *Status) bool { - if s1.Active == s2.Active && - s1.Start == s2.Start && - durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && - s1.Idle == s2.Idle && - s1.Bytes == s2.Bytes && - s1.Samples == s2.Samples && - ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && - ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && - ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && - ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && - s1.BytesRem == s2.BytesRem && - durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && - s1.Progress == s2.Progress { - return true - } - return false -} - -func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - return d2-d1 <= maxDeviation -} - -func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { - sub := r1 - r2 - if sub < 0 { - sub = -sub - } - if sub <= maxDeviation { - return true - } - return false -} diff --git a/internal/libs/protoio/io_test.go b/internal/libs/protoio/io_test.go index a84b34c002..4420ad7863 100644 --- a/internal/libs/protoio/io_test.go +++ b/internal/libs/protoio/io_test.go @@ -44,7 +44,8 @@ import ( "github.com/tendermint/tendermint/internal/libs/protoio" ) -func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { +func iotest(t *testing.T, writer protoio.WriteCloser, reader protoio.ReadCloser) error { + t.Helper() varint := make([]byte, binary.MaxVarintLen64) size := 1000 msgs := make([]*test.NinOptNative, size) @@ -94,9 +95,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { } i++ } - if i != size { - panic("not enough messages read") - } + require.Equal(t, size, i, "messages read ≠ messages written") if err := reader.Close(); err != nil { return err } @@ -121,7 +120,7 @@ func TestVarintNormal(t *testing.T) { buf := newBuffer() writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 1024*1024) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.NoError(t, err) require.True(t, buf.closed, "did not close buffer") } @@ -130,7 +129,7 @@ func TestVarintNoClose(t *testing.T) { buf := bytes.NewBuffer(nil) writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 1024*1024) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.NoError(t, err) } @@ -139,7 +138,7 @@ func TestVarintMaxSize(t *testing.T) { buf := newBuffer() writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 20) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.Error(t, err) } diff --git a/internal/libs/protoio/writer.go b/internal/libs/protoio/writer.go index d4c66798fa..93be1f8513 100644 --- a/internal/libs/protoio/writer.go +++ b/internal/libs/protoio/writer.go @@ -34,6 +34,7 @@ import ( "bytes" "encoding/binary" "io" + "sync" "github.com/gogo/protobuf/proto" ) @@ -90,11 +91,44 @@ func (w *varintWriter) Close() error { return nil } +func varintWrittenBytes(m marshaler, size int) ([]byte, error) { + buf := make([]byte, size+binary.MaxVarintLen64) + n := binary.PutUvarint(buf, uint64(size)) + nw, err := m.MarshalTo(buf[n:]) + if err != nil { + return nil, err + } + return buf[:n+nw], nil +} + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + func MarshalDelimited(msg proto.Message) ([]byte, error) { - var buf bytes.Buffer - _, err := NewDelimitedWriter(&buf).WriteMsg(msg) + // The goal here is to write proto message as is knowning already if + // the exact size can be retrieved and if so just use that. + if m, ok := msg.(marshaler); ok { + size, ok := getSize(msg) + if ok { + return varintWrittenBytes(m, size) + } + } + + // Otherwise, go down the route of using proto.Marshal, + // and use the buffer pool to retrieve a writer. + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, err := NewDelimitedWriter(buf).WriteMsg(msg) if err != nil { return nil, err } - return buf.Bytes(), nil + // Given that we are reusing buffers, we should + // make a copy of the returned bytes. + bytesCopy := make([]byte, buf.Len()) + copy(bytesCopy, buf.Bytes()) + return bytesCopy, nil } diff --git a/internal/libs/protoio/writer_test.go b/internal/libs/protoio/writer_test.go new file mode 100644 index 0000000000..69867f7334 --- /dev/null +++ b/internal/libs/protoio/writer_test.go @@ -0,0 +1,90 @@ +package protoio_test + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/libs/protoio" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +func aVote(t testing.TB) *types.Vote { + t.Helper() + var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") + require.NoError(t, err) + + return &types.Vote{ + Type: tmproto.SignedMsgType(byte(tmproto.PrevoteType)), + Height: 12345, + Round: 2, + Timestamp: stamp, + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: 56789, + } +} + +type excludedMarshalTo struct { + msg proto.Message +} + +func (emt *excludedMarshalTo) ProtoMessage() {} +func (emt *excludedMarshalTo) String() string { + return emt.msg.String() +} +func (emt *excludedMarshalTo) Reset() { + emt.msg.Reset() +} +func (emt *excludedMarshalTo) Marshal() ([]byte, error) { + return proto.Marshal(emt.msg) +} + +var _ proto.Message = (*excludedMarshalTo)(nil) + +var sink interface{} + +func BenchmarkMarshalDelimitedWithMarshalTo(b *testing.B) { + msgs := []proto.Message{ + aVote(b).ToProto(), + } + benchmarkMarshalDelimited(b, msgs) +} + +func BenchmarkMarshalDelimitedNoMarshalTo(b *testing.B) { + msgs := []proto.Message{ + &excludedMarshalTo{aVote(b).ToProto()}, + } + benchmarkMarshalDelimited(b, msgs) +} + +func benchmarkMarshalDelimited(b *testing.B, msgs []proto.Message) { + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, msg := range msgs { + blob, err := protoio.MarshalDelimited(msg) + require.Nil(b, err) + sink = blob + } + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} diff --git a/internal/libs/queue/queue.go b/internal/libs/queue/queue.go new file mode 100644 index 0000000000..7b4199504b --- /dev/null +++ b/internal/libs/queue/queue.go @@ -0,0 +1,232 @@ +// Package queue implements a dynamic FIFO queue with a fixed upper bound +// and a flexible quota mechanism to handle bursty load. +package queue + +import ( + "context" + "errors" + "sync" +) + +var ( + // ErrQueueFull is returned by the Add method of a queue when the queue has + // reached its hard capacity limit. + ErrQueueFull = errors.New("queue is full") + + // ErrNoCredit is returned by the Add method of a queue when the queue has + // exceeded its soft quota and there is insufficient burst credit. + ErrNoCredit = errors.New("insufficient burst credit") + + // ErrQueueClosed is returned by the Add method of a closed queue, and by + // the Wait method of a closed empty queue. + ErrQueueClosed = errors.New("queue is closed") + + // Sentinel errors reported by the New constructor. + errHardLimit = errors.New("hard limit must be > 0 and ≥ soft quota") + errBurstCredit = errors.New("burst credit must be non-negative") +) + +// A Queue is a limited-capacity FIFO queue of arbitrary data items. +// +// A queue has a soft quota and a hard limit on the number of items that may be +// contained in the queue. Adding items in excess of the hard limit will fail +// unconditionally. +// +// For items in excess of the soft quota, a credit system applies: Each queue +// maintains a burst credit score. Adding an item in excess of the soft quota +// costs 1 unit of burst credit. If there is not enough burst credit, the add +// will fail. +// +// The initial burst credit is assigned when the queue is constructed. Removing +// items from the queue adds additional credit if the resulting queue length is +// less than the current soft quota. Burst credit is capped by the hard limit. +// +// A Queue is safe for concurrent use by multiple goroutines. +type Queue struct { + mu sync.Mutex // protects the fields below + + softQuota int // adjusted dynamically (see Add, Remove) + hardLimit int // fixed for the lifespan of the queue + queueLen int // number of entries in the queue list + credit float64 // current burst credit + + closed bool + nempty *sync.Cond + back *entry + front *entry + + // The queue is singly-linked. Front points to the sentinel and back points + // to the newest entry. The oldest entry is front.link if it exists. +} + +// New constructs a new empty queue with the specified options. It reports an +// error if any of the option values are invalid. +func New(opts Options) (*Queue, error) { + if opts.HardLimit <= 0 || opts.HardLimit < opts.SoftQuota { + return nil, errHardLimit + } + if opts.BurstCredit < 0 { + return nil, errBurstCredit + } + if opts.SoftQuota <= 0 { + opts.SoftQuota = opts.HardLimit + } + if opts.BurstCredit == 0 { + opts.BurstCredit = float64(opts.SoftQuota) + } + sentinel := new(entry) + q := &Queue{ + softQuota: opts.SoftQuota, + hardLimit: opts.HardLimit, + credit: opts.BurstCredit, + back: sentinel, + front: sentinel, + } + q.nempty = sync.NewCond(&q.mu) + return q, nil +} + +// Add adds item to the back of the queue. It reports an error and does not +// enqueue the item if the queue is full or closed, or if it exceeds its soft +// quota and there is not enough burst credit. +func (q *Queue) Add(item interface{}) error { + q.mu.Lock() + defer q.mu.Unlock() + + if q.closed { + return ErrQueueClosed + } + + if q.queueLen >= q.softQuota { + if q.queueLen == q.hardLimit { + return ErrQueueFull + } else if q.credit < 1 { + return ErrNoCredit + } + + // Successfully exceeding the soft quota deducts burst credit and raises + // the soft quota. This has the effect of reducing the credit cap and the + // amount of credit given for removing items to better approximate the + // rate at which the consumer is servicing the queue. + q.credit-- + q.softQuota = q.queueLen + 1 + } + e := &entry{item: item} + q.back.link = e + q.back = e + q.queueLen++ + if q.queueLen == 1 { // was empty + q.nempty.Signal() + } + return nil +} + +// Remove removes and returns the frontmost (oldest) item in the queue and +// reports whether an item was available. If the queue is empty, Remove +// returns nil, false. +func (q *Queue) Remove() (interface{}, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if q.queueLen == 0 { + return nil, false + } + return q.popFront(), true +} + +// Wait blocks until q is non-empty or closed, and then returns the frontmost +// (oldest) item from the queue. If ctx ends before an item is available, Wait +// returns a nil value and a context error. If the queue is closed while it is +// still empty, Wait returns nil, ErrQueueClosed. +func (q *Queue) Wait(ctx context.Context) (interface{}, error) { + // If the context terminates, wake the waiter. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { <-ctx.Done(); q.nempty.Broadcast() }() + + q.mu.Lock() + defer q.mu.Unlock() + + for q.queueLen == 0 { + if q.closed { + return nil, ErrQueueClosed + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + q.nempty.Wait() + } + } + return q.popFront(), nil +} + +// Close closes the queue. After closing, any further Add calls will report an +// error, but items that were added to the queue prior to closing will still be +// available for Remove and Wait. Wait will report an error without blocking if +// it is called on a closed, empty queue. +func (q *Queue) Close() error { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.nempty.Broadcast() + return nil +} + +// popFront removes the frontmost item of q and returns its value after +// updating quota and credit settings. +// +// Preconditions: The caller holds q.mu and q is not empty. +func (q *Queue) popFront() interface{} { + e := q.front.link + q.front.link = e.link + if e == q.back { + q.back = q.front + } + q.queueLen-- + + if q.queueLen < q.softQuota { + // Successfully removing items from the queue below half the soft quota + // lowers the soft quota. This has the effect of increasing the credit cap + // and the amount of credit given for removing items to better approximate + // the rate at which the consumer is servicing the queue. + if q.softQuota > 1 && q.queueLen < q.softQuota/2 { + q.softQuota-- + } + + // Give credit for being below the soft quota. Note we do this after + // adjusting the quota so the credit reflects the item we just removed. + q.credit += float64(q.softQuota-q.queueLen) / float64(q.softQuota) + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + q.credit = cap + } + } + + return e.item +} + +// Options are the initial settings for a Queue. +type Options struct { + // The maximum number of items the queue will ever be permitted to hold. + // This value must be positive, and greater than or equal to SoftQuota. The + // hard limit is fixed and does not change as the queue is used. + // + // The hard limit should be chosen to exceed the largest burst size expected + // under normal operating conditions. + HardLimit int + + // The initial expected maximum number of items the queue should contain on + // an average workload. If this value is zero, it is initialized to the hard + // limit. The soft quota is adjusted from the initial value dynamically as + // the queue is used. + SoftQuota int + + // The initial burst credit score. This value must be greater than or equal + // to zero. If it is zero, the soft quota is used. + BurstCredit float64 +} + +type entry struct { + item interface{} + link *entry +} diff --git a/internal/libs/queue/queue_test.go b/internal/libs/queue/queue_test.go new file mode 100644 index 0000000000..204c18653c --- /dev/null +++ b/internal/libs/queue/queue_test.go @@ -0,0 +1,194 @@ +package queue + +import ( + "context" + "testing" + "time" +) + +func TestNew(t *testing.T) { + tests := []struct { + desc string + opts Options + want error + }{ + {"empty options", Options{}, errHardLimit}, + {"zero limit negative quota", Options{SoftQuota: -1}, errHardLimit}, + {"zero limit and quota", Options{SoftQuota: 0}, errHardLimit}, + {"zero limit", Options{SoftQuota: 1, HardLimit: 0}, errHardLimit}, + {"limit less than quota", Options{SoftQuota: 5, HardLimit: 3}, errHardLimit}, + {"negative credit", Options{SoftQuota: 1, HardLimit: 1, BurstCredit: -6}, errBurstCredit}, + {"valid default credit", Options{SoftQuota: 1, HardLimit: 2, BurstCredit: 0}, nil}, + {"valid explicit credit", Options{SoftQuota: 1, HardLimit: 5, BurstCredit: 10}, nil}, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + got, err := New(test.opts) + if err != test.want { + t.Errorf("New(%+v): got (%+v, %v), want err=%v", test.opts, got, err, test.want) + } + }) + } +} + +type testQueue struct { + t *testing.T + *Queue +} + +func (q testQueue) mustAdd(item string) { + q.t.Helper() + if err := q.Add(item); err != nil { + q.t.Errorf("Add(%q): unexpected error: %v", item, err) + } +} + +func (q testQueue) mustRemove(want string) { + q.t.Helper() + got, ok := q.Remove() + if !ok { + q.t.Error("Remove: queue is empty") + } else if got.(string) != want { + q.t.Errorf("Remove: got %q, want %q", got, want) + } +} + +func mustQueue(t *testing.T, opts Options) testQueue { + t.Helper() + + q, err := New(opts) + if err != nil { + t.Fatalf("New(%+v): unexpected error: %v", opts, err) + } + return testQueue{t: t, Queue: q} +} + +func TestHardLimit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 1}) + q.mustAdd("foo") + if err := q.Add("bar"); err != ErrQueueFull { + t.Errorf("Add: got err=%v, want %v", err, ErrQueueFull) + } +} + +func TestSoftQuota(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 4}) + q.mustAdd("foo") + q.mustAdd("bar") + if err := q.Add("baz"); err != ErrNoCredit { + t.Errorf("Add: got err=%v, want %v", err, ErrNoCredit) + } +} + +func TestBurstCredit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 5}) + q.mustAdd("foo") + q.mustAdd("bar") + + // We should still have all our initial credit. + if q.credit < 2 { + t.Errorf("Wrong credit: got %f, want ≥ 2", q.credit) + } + + // Removing an item below soft quota should increase our credit. + q.mustRemove("foo") + if q.credit <= 2 { + t.Errorf("wrong credit: got %f, want > 2", q.credit) + } + + // Credit should be capped by the hard limit. + q.mustRemove("bar") + q.mustAdd("baz") + q.mustRemove("baz") + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + t.Errorf("Wrong credit: got %f, want ≤ %f", q.credit, cap) + } +} + +func TestClose(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 10}) + q.mustAdd("alpha") + q.mustAdd("bravo") + q.mustAdd("charlie") + q.Close() + + // After closing the queue, subsequent writes should fail. + if err := q.Add("foxtrot"); err == nil { + t.Error("Add should have failed after Close") + } + + // However, the remaining contents of the queue should still work. + q.mustRemove("alpha") + q.mustRemove("bravo") + q.mustRemove("charlie") +} + +func TestWait(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 2}) + + // A wait on an empty queue should time out. + t.Run("WaitTimeout", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + got, err := q.Wait(ctx) + if err == nil { + t.Errorf("Wait: got %v, want error", got) + } else { + t.Logf("Wait correctly failed: %v", err) + } + }) + + // A wait on a non-empty queue should report an item. + t.Run("WaitNonEmpty", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const input = "figgy pudding" + q.mustAdd(input) + + got, err := q.Wait(ctx) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }) + + // Wait should block until an item arrives. + t.Run("WaitOnEmpty", func(t *testing.T) { + const input = "fleet footed kittens" + + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(ctx) + if err != nil { + t.Errorf("Wait: unexpected error: %w", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }() + + q.mustAdd(input) + <-done + }) + + // Closing the queue unblocks a wait. + t.Run("UnblockOnClose", func(t *testing.T) { + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(ctx) + if err != ErrQueueClosed { + t.Errorf("Wait: got (%v, %v), want %v", got, err, ErrQueueClosed) + } + }() + + q.Close() + <-done + }) +} diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go deleted file mode 100644 index 21b5130ba4..0000000000 --- a/internal/libs/sync/deadlock.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build deadlock -// +build deadlock - -package sync - -import ( - deadlock "github.com/sasha-s/go-deadlock" -) - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - deadlock.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - deadlock.RWMutex -} diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go deleted file mode 100644 index c6e7101c60..0000000000 --- a/internal/libs/sync/sync.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !deadlock -// +build !deadlock - -package sync - -import "sync" - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - sync.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - sync.RWMutex -} diff --git a/internal/libs/tempfile/tempfile.go b/internal/libs/tempfile/tempfile.go index 0c594bb20f..e30d5a8c60 100644 --- a/internal/libs/tempfile/tempfile.go +++ b/internal/libs/tempfile/tempfile.go @@ -7,9 +7,8 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) const ( @@ -32,7 +31,7 @@ const ( var ( atomicWriteFileRand uint64 - atomicWriteFileRandMu tmsync.Mutex + atomicWriteFileRandMu sync.Mutex ) func writeFileRandReseed() uint64 { diff --git a/internal/libs/tempfile/tempfile_test.go b/internal/libs/tempfile/tempfile_test.go index 5650fe7208..5c38f9736c 100644 --- a/internal/libs/tempfile/tempfile_test.go +++ b/internal/libs/tempfile/tempfile_test.go @@ -5,10 +5,9 @@ package tempfile import ( "bytes" "fmt" - "io/ioutil" mrand "math/rand" "os" - testing "testing" + "testing" "github.com/stretchr/testify/require" @@ -22,13 +21,13 @@ func TestWriteFileAtomic(t *testing.T) { perm os.FileMode = 0600 ) - f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + f, err := os.CreateTemp("/tmp", "write-atomic-test-") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0600); err != nil { t.Fatal(err) } @@ -36,7 +35,7 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } - rData, err := ioutil.ReadFile(f.Name()) + rData, err := os.ReadFile(f.Name()) if err != nil { t.Fatal(err) } @@ -81,11 +80,11 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) require.NoError(t, err) // Check that the first atomic file was untouched - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") @@ -115,7 +114,7 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) - require.Nil(t, err) + require.NoError(t, err) _, err = f.WriteString(fmt.Sprintf(testString, i)) require.NoError(t, err) defer os.Remove(fname) @@ -132,14 +131,14 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - firstAtomicFileBytes, err := ioutil.ReadFile(fname) - require.Nil(t, err, "Error reading first atomic file") + firstAtomicFileBytes, err := os.ReadFile(fname) + require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) - require.Nil(t, err, "Error reading resultant file") + resultantFileBytes, err := os.ReadFile(fileToWrite) + require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/internal/libs/timer/throttle_timer.go b/internal/libs/timer/throttle_timer.go index 3f21e3cc04..7bf86e80c1 100644 --- a/internal/libs/timer/throttle_timer.go +++ b/internal/libs/timer/throttle_timer.go @@ -1,9 +1,8 @@ package timer import ( + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) /* @@ -18,7 +17,7 @@ type ThrottleTimer struct { quit chan struct{} dur time.Duration - mtx tmsync.Mutex + mtx sync.Mutex timer *time.Timer isSet bool } @@ -56,13 +55,6 @@ func (t *ThrottleTimer) Set() { } } -func (t *ThrottleTimer) Unset() { - t.mtx.Lock() - defer t.mtx.Unlock() - t.isSet = false - t.timer.Stop() -} - // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil ThrottleTimers func (t *ThrottleTimer) Stop() bool { diff --git a/internal/libs/timer/throttle_timer_test.go b/internal/libs/timer/throttle_timer_test.go index a56dcadfd0..7ea392c3a4 100644 --- a/internal/libs/timer/throttle_timer_test.go +++ b/internal/libs/timer/throttle_timer_test.go @@ -1,19 +1,18 @@ package timer import ( + "sync" "testing" "time" // make govet noshadow happy... asrt "github.com/stretchr/testify/assert" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) type thCounter struct { input chan struct{} - mtx tmsync.Mutex + mtx sync.Mutex count int } diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 3cd45d2bc5..c69fc80dd4 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -2,8 +2,8 @@ package mempool import ( "container/list" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -29,7 +29,7 @@ var _ TxCache = (*LRUTxCache)(nil) // LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache // only stores the hash of the raw transaction. type LRUTxCache struct { - mtx tmsync.Mutex + mtx sync.Mutex size int cacheMap map[types.TxKey]*list.Element list *list.List diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 656f5b74cd..3788afcbc9 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -2,13 +2,13 @@ package mempool import ( "fmt" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) type IDs struct { - mtx tmsync.RWMutex + mtx sync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 6e3955dc3b..bbffcd59dc 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,143 +1,858 @@ package mempool import ( + "bytes" "context" + "errors" "fmt" - "math" + "reflect" + "sync" + "sync/atomic" + "time" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" + "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) -const ( - MempoolChannel = p2p.ChannelID(0x30) +var _ Mempool = (*TxMempool)(nil) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind - PeerCatchupSleepIntervalMS = 100 +// TxMempoolOption sets an optional parameter on the TxMempool. +type TxMempoolOption func(*TxMempool) - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) - UnknownPeerID uint16 = 0 +// TxMempool defines a prioritized mempool data structure used by the v1 mempool +// reactor. It keeps a thread-safe priority queue of transactions that is used +// when a block proposer constructs a block and a thread-safe linked-list that +// is used to gossip transactions to peers in a FIFO manner. +type TxMempool struct { + logger log.Logger + metrics *Metrics + config *config.MempoolConfig + proxyAppConn proxy.AppConnMempool - MaxActiveIDs = math.MaxUint16 -) + // txsAvailable fires once for each height when the mempool is not empty + txsAvailable chan struct{} + notifiedTxsAvailable bool -// Mempool defines the mempool interface. -// -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. -type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. - CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - // RemoveTxByKey removes a transaction, identified by its key, - // from the mempool. - RemoveTxByKey(txKey types.TxKey) error - - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + // height defines the last block height process during Update() + height int64 - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). - ReapMaxTxs(max int) types.Txs + // sizeBytes defines the total size of the mempool (sum of all tx bytes) + sizeBytes int64 - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. - Lock() + // cache defines a fixed-size cache of already seen transactions as this + // reduces pressure on the proxyApp. + cache TxCache - // Unlock unlocks the mempool. - Unlock() + // txStore defines the main storage of valid transactions. Indexes are built + // on top of this store. + txStore *TxStore - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. - Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn PreCheckFunc, - newPostFn PostCheckFunc, - ) error - - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. + // gossipIndex defines the gossiping index of valid transactions via a + // thread-safe linked-list. We also use the gossip index as a cursor for + // rechecking transactions already in the mempool. + gossipIndex *clist.CList + + // recheckCursor and recheckEnd are used as cursors based on the gossip index + // to recheck transactions that are already in the mempool. Iteration is not + // thread-safe and transaction may be mutated in serial order. // - // NOTE: - // 1. Lock/Unlock must be managed by caller. - FlushAppConn() error + // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for + // iterator and cursor management when rechecking transactions. If the gossip + // index changes or is removed in a future refactor, this will have to be + // refactored. Instead, we should consider just keeping a slice of a snapshot + // of the mempool's current transactions during Update and an integer cursor + // into that slice. This, however, requires additional O(n) space complexity. + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here - // Flush removes all transactions from the mempool and caches. - Flush() + // priorityIndex defines the priority index of valid transactions via a + // thread-safe priority queue. + priorityIndex *TxPriorityQueue - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. - TxsAvailable() <-chan struct{} + // heightIndex defines a height-based, in ascending order, transaction index. + // i.e. older transactions are first. + heightIndex *WrappedTxList + + // timestampIndex defines a timestamp-based, in ascending order, transaction + // index. i.e. older transactions are first. + timestampIndex *WrappedTxList - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. - EnableTxsAvailable() + // A read/write lock is used to safe guard updates, insertions and deletions + // from the mempool. A read-lock is implicitly acquired when executing CheckTx, + // however, a caller must explicitly grab a write-lock via Lock when updating + // the mempool via Update(). + mtx sync.RWMutex + preCheck PreCheckFunc + postCheck PostCheckFunc +} + +func NewTxMempool( + logger log.Logger, + cfg *config.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...TxMempoolOption, +) *TxMempool { - // Size returns the number of transactions in the mempool. - Size() int + txmp := &TxMempool{ + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + height: height, + cache: NopTxCache{}, + metrics: NopMetrics(), + txStore: NewTxStore(), + gossipIndex: clist.New(), + priorityIndex: NewTxPriorityQueue(), + heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.height >= wtx2.height + }), + timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) + }), + } - // SizeBytes returns the total size of all txs in the mempool. - SizeBytes() int64 + if cfg.CacheSize > 0 { + txmp.cache = NewLRUTxCache(cfg.CacheSize) + } + + proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) + + for _, opt := range options { + opt(txmp) + } + + return txmp } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. -type PreCheckFunc func(types.Tx) error +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. +func WithPreCheck(f PreCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.preCheck = f } +} -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. +func WithPostCheck(f PostCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.postCheck = f } +} -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. -func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { - return func(tx types.Tx) error { - txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) +// WithMetrics sets the mempool's metrics collector. +func WithMetrics(metrics *Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } +} - if txSize > maxBytes { - return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. +func (txmp *TxMempool) Lock() { + txmp.mtx.Lock() +} + +// Unlock releases a write-lock on the mempool. +func (txmp *TxMempool) Unlock() { + txmp.mtx.Unlock() +} + +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. +func (txmp *TxMempool) Size() int { + return txmp.txStore.Size() +} + +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. +func (txmp *TxMempool) SizeBytes() int64 { + return atomic.LoadInt64(&txmp.sizeBytes) +} + +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// +// NOTE: The caller must obtain a write-lock prior to execution. +func (txmp *TxMempool) FlushAppConn(ctx context.Context) error { + return txmp.proxyAppConn.Flush(ctx) +} + +// WaitForNextTx returns a blocking channel that will be closed when the next +// valid transaction is available to gossip. It is thread-safe. +func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { + return txmp.gossipIndex.WaitChan() +} + +// NextGossipTx returns the next valid transaction to gossip. A caller must wait +// for WaitForNextTx to signal a transaction is available to gossip first. It is +// thread-safe. +func (txmp *TxMempool) NextGossipTx() *clist.CElement { + return txmp.gossipIndex.Front() +} + +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. +func (txmp *TxMempool) EnableTxsAvailable() { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + + txmp.txsAvailable = make(chan struct{}, 1) +} + +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + return txmp.txsAvailable +} + +// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires +// a read-lock attempts to execute the application's CheckTx ABCI method via +// CheckTxAsync. We return an error if any of the following happen: +// +// - The CheckTxAsync execution fails. +// - The transaction already exists in the cache and we've already received the +// transaction from the peer. Otherwise, if it solely exists in the cache, we +// return nil. +// - The transaction size exceeds the maximum transaction size as defined by the +// configuration provided to the mempool. +// - The transaction fails Pre-Check (if it is defined). +// - The proxyAppConn fails, e.g. the buffer is full. +// +// If the mempool is full, we still execute CheckTx and attempt to find a lower +// priority transaction to evict. If such a transaction exists, we remove the +// lower priority transaction and add the new one with higher priority. +// +// NOTE: +// - The applications' CheckTx implementation may panic. +// - The caller is not to explicitly require any locks for executing CheckTx. +func (txmp *TxMempool) CheckTx( + ctx context.Context, + tx types.Tx, + cb func(*abci.Response), + txInfo TxInfo, +) error { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + if txSize := len(tx); txSize > txmp.config.MaxTxBytes { + return types.ErrTxTooLarge{ + Max: txmp.config.MaxTxBytes, + Actual: txSize, } + } + + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return types.ErrPreCheck{Reason: err} + } + } + + if err := txmp.proxyAppConn.Error(); err != nil { + return err + } + + txHash := tx.Key() + + // We add the transaction to the mempool's cache and if the + // transaction is already present in the cache, i.e. false is returned, then we + // check if we've seen this transaction and error if we have. + if !txmp.cache.Push(tx) { + txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) + return types.ErrTxInCache + } + + reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) + if err != nil { + txmp.cache.Remove(tx) + return err + } + reqRes.SetCallback(func(res *abci.Response) { + if txmp.recheckCursor != nil { + panic("recheck cursor is non-nil in CheckTx callback") + } + + wtx := &WrappedTx{ + tx: tx, + hash: txHash, + timestamp: time.Now().UTC(), + height: txmp.height, + } + txmp.initTxCallback(wtx, res, txInfo) + + if cb != nil { + cb(res) + } + }) + + return nil +} + +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) return nil } + + return errors.New("transaction not found") +} + +// Flush empties the mempool. It acquires a read-lock, fetches all the +// transactions currently in the transaction store and removes each transaction +// from the store and all indexes and finally resets the cache. +// +// NOTE: +// - Flushing the mempool may leave the mempool in an inconsistent state. +func (txmp *TxMempool) Flush() { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + txmp.heightIndex.Reset() + txmp.timestampIndex.Reset() + + for _, wtx := range txmp.txStore.GetAllTxs() { + txmp.removeTx(wtx, false) + } + + atomic.SwapInt64(&txmp.sizeBytes, 0) + txmp.cache.Reset() } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. -func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { - if maxGas == -1 { - return nil +// ReapMaxBytesMaxGas returns a list of transactions within the provided size +// and gas constraints. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + var ( + totalGas int64 + totalSize int64 + ) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) } - if res.GasWanted < 0 { - return fmt.Errorf("gas wanted %d is negative", - res.GasWanted) + }() + + txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) + for txmp.priorityIndex.NumTxs() > 0 { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + + // Ensure we have capacity for the transaction with respect to the + // transaction size. + if maxBytes > -1 && totalSize+size > maxBytes { + return txs[:len(txs)-1] } - if res.GasWanted > maxGas { - return fmt.Errorf("gas wanted %d is greater than max gas %d", - res.GasWanted, maxGas) + + totalSize += size + + // ensure we have capacity for the transaction with respect to total gas + gas := totalGas + wtx.gasWanted + if maxGas > -1 && gas > maxGas { + return txs[:len(txs)-1] } - return nil + totalGas = gas + } + + return txs +} + +// ReapMaxTxs returns a list of transactions within the provided number of +// transactions bound. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + numTxs := txmp.priorityIndex.NumTxs() + if max < 0 { + max = numTxs + } + + cap := tmmath.MinInt(numTxs, max) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, cap) + txs := make([]types.Tx, 0, cap) + for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + } + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + return txs +} + +// Update iterates over all the transactions provided by the block producer, +// removes them from the cache (if applicable), and removes +// the transactions from the main transaction store and associated indexes. +// If there are transactions remaining in the mempool, we initiate a +// re-CheckTx for them (if applicable), otherwise, we notify the caller more +// transactions are available. +// +// NOTE: +// - The caller must explicitly acquire a write-lock. +func (txmp *TxMempool) Update( + ctx context.Context, + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, +) error { + + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { + txmp.preCheck = newPreFn + } + if newPostFn != nil { + txmp.postCheck = newPostFn + } + + for i, tx := range blockTxs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // add the valid committed transaction to the cache (if missing) + _ = txmp.cache.Push(tx) + } else if !txmp.config.KeepInvalidTxsInCache { + // allow invalid transactions to be re-submitted + txmp.cache.Remove(tx) + } + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { + txmp.removeTx(wtx, false) + } + } + + txmp.purgeExpiredTxs(blockHeight) + + // If there any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. + if txmp.Size() > 0 { + if txmp.config.Recheck { + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", blockHeight, + ) + txmp.updateReCheckTxs(ctx) + } else { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + return nil +} + +// initTxCallback is the callback invoked for a new unique transaction after CheckTx +// has been executed by the ABCI application for the first time on that transaction. +// CheckTx can be called again for the same transaction later when re-checking; +// however, this callback will not be called. +// +// initTxCallback runs after the ABCI application executes CheckTx. +// It runs the postCheck hook if one is defined on the mempool. +// If the CheckTx response response code is not OK, or if the postCheck hook +// reports an error, the transaction is rejected. Otherwise, we attempt to insert +// the transaction into the mempool. +// +// When inserting a transaction, we first check if there is sufficient capacity. +// If there is, the transaction is added to the txStore and all indexes. +// Otherwise, if the mempool is full, we attempt to find a lower priority transaction +// to evict in place of the new incoming transaction. If no such transaction exists, +// the new incoming transaction is rejected. +// +// NOTE: +// - An explicit lock is NOT required. +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo TxInfo) { + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + return + } + + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) + } + + if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { + // ignore bad transactions + txmp.logger.Info( + "rejected bad transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "peer_id", txInfo.SenderNodeID, + "code", checkTxRes.CheckTx.Code, + "post_check_err", err, + ) + + txmp.metrics.FailedTxs.Add(1) + + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) + } + if err != nil { + checkTxRes.CheckTx.MempoolError = err.Error() + } + return + } + + sender := checkTxRes.CheckTx.Sender + priority := checkTxRes.CheckTx.Priority + + if len(sender) > 0 { + if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { + txmp.logger.Error( + "rejected incoming good transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "sender", sender, + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + } + + if err := txmp.canAddTx(wtx); err != nil { + evictTxs := txmp.priorityIndex.GetEvictableTxs( + priority, + int64(wtx.Size()), + txmp.SizeBytes(), + txmp.config.MaxTxsBytes, + ) + if len(evictTxs) == 0 { + // No room for the new incoming transaction so we just remove it from + // the cache. + txmp.cache.Remove(wtx.tx) + txmp.logger.Error( + "rejected incoming good transaction; mempool full", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err.Error(), + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + + // evict an existing transaction(s) + // + // NOTE: + // - The transaction, toEvict, can be removed while a concurrent + // reCheckTx callback is being executed for the same transaction. + for _, toEvict := range evictTxs { + txmp.removeTx(toEvict, true) + txmp.logger.Debug( + "evicted existing good transaction; mempool full", + "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), + "old_priority", toEvict.priority, + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", wtx.priority, + ) + txmp.metrics.EvictedTxs.Add(1) + } + } + + wtx.gasWanted = checkTxRes.CheckTx.GasWanted + wtx.priority = priority + wtx.sender = sender + wtx.peers = map[uint16]struct{}{ + txInfo.SenderID: {}, + } + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) + + txmp.insertTx(wtx) + txmp.logger.Debug( + "inserted good transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "height", txmp.height, + "num_txs", txmp.Size(), + ) + txmp.notifyTxsAvailable() +} + +// defaultTxCallback is the CheckTx application callback used when a transaction +// is being re-checked (if re-checking is enabled). The caller must hold a mempool +// write-lock (via Lock()) and when executing Update(), if the mempool is non-empty +// and Recheck is enabled, then all remaining transactions will be rechecked via +// CheckTxAsync. The order transactions are rechecked must be the same as the +// order in which this callback is called. +func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { + if txmp.recheckCursor == nil { + return + } + + txmp.metrics.RecheckTimes.Add(1) + + checkTxRes, ok := res.Value.(*abci.Response_CheckTx) + if !ok { + txmp.logger.Error("received incorrect type in mempool callback", + "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), + "got", reflect.TypeOf(res.Value).Name(), + ) + return + } + tx := req.GetCheckTx().Tx + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", types.Tx(tx).Key(), + ) + + if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + txmp.recheckCursor = nil + return + } + + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, checkTxRes.CheckTx) + } + + if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { + wtx.priority = checkTxRes.CheckTx.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", checkTxRes.CheckTx.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) +} + +// updateReCheckTxs updates the recheck cursors using the gossipIndex. For +// each transaction, it executes CheckTxAsync. The global callback defined on +// the proxyAppConn will be executed for each transaction after CheckTx is +// executed. +// +// NOTE: +// - The caller must have a write-lock when executing updateReCheckTxs. +func (txmp *TxMempool) updateReCheckTxs(ctx context.Context) { + if txmp.Size() == 0 { + panic("attempted to update re-CheckTx txs when mempool is empty") + } + + txmp.recheckCursor = txmp.gossipIndex.Front() + txmp.recheckEnd = txmp.gossipIndex.Back() + + for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { + wtx := e.Value.(*WrappedTx) + + // Only execute CheckTx if the transaction is not marked as removed which + // could happen if the transaction was evicted. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + // no need in retrying since the tx will be rechecked after the next block + txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) + } + } + } + + if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during rechecking", "err", err) + } +} + +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. If it returns nil, +// the transaction can be inserted into the mempool. +func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + var ( + numTxs = txmp.Size() + sizeBytes = txmp.SizeBytes() + ) + + if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + return types.ErrMempoolIsFull{ + NumTxs: numTxs, + MaxTxs: txmp.config.Size, + TxsBytes: sizeBytes, + MaxTxsBytes: txmp.config.MaxTxsBytes, + } + } + + return nil +} + +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + txmp.txStore.SetTx(wtx) + txmp.priorityIndex.PushTx(wtx) + txmp.heightIndex.Insert(wtx) + txmp.timestampIndex.Insert(wtx) + + // Insert the transaction into the gossip index and mark the reference to the + // linked-list element, which will be needed at a later point when the + // transaction is removed. + gossipEl := txmp.gossipIndex.PushBack(wtx) + wtx.gossipEl = gossipEl + + atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) +} + +func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { + if txmp.txStore.IsTxRemoved(wtx.hash) { + return + } + + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + txmp.heightIndex.Remove(wtx) + txmp.timestampIndex.Remove(wtx) + + // Remove the transaction from the gossip index and cleanup the linked-list + // element so it can be garbage collected. + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + + atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) + + if removeFromCache { + txmp.cache.Remove(wtx.tx) + } +} + +// purgeExpiredTxs removes all transactions that have exceeded their respective +// height- and/or time-based TTLs from their respective indexes. Every expired +// transaction will be removed from the mempool, but preserved in the cache. +// +// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which +// the caller has a write-lock on the mempool and so we can safely iterate over +// the height and time based indexes. +func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { + now := time.Now() + expiredTxs := make(map[types.TxKey]*WrappedTx) + + if txmp.config.TTLNumBlocks > 0 { + purgeIdx := -1 + for i, wtx := range txmp.heightIndex.txs { + if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] + } + } + + if txmp.config.TTLDuration > 0 { + purgeIdx := -1 + for i, wtx := range txmp.timestampIndex.txs { + if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + } + } + + for _, wtx := range expiredTxs { + txmp.removeTx(wtx, false) + } +} + +func (txmp *TxMempool) notifyTxsAvailable() { + if txmp.Size() == 0 { + panic("attempt to notify txs available but mempool is empty!") + } + + if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { + // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true + + select { + case txmp.txsAvailable <- struct{}{}: + default: + } } } diff --git a/internal/mempool/v1/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go similarity index 72% rename from internal/mempool/v1/mempool_bench_test.go rename to internal/mempool/mempool_bench_test.go index ca23f1479d..82848dbfb2 100644 --- a/internal/mempool/v1/mempool_bench_test.go +++ b/internal/mempool/mempool_bench_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -8,11 +8,13 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" ) func BenchmarkTxMempool_CheckTx(b *testing.B) { - txmp := setup(b, 10000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, b, 10000) rng := rand.New(rand.NewSource(time.Now().UnixNano())) b.ResetTimer() @@ -27,6 +29,6 @@ func BenchmarkTxMempool_CheckTx(b *testing.B) { tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) b.StartTimer() - require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + require.NoError(b, txmp.CheckTx(ctx, tx, nil, TxInfo{})) } } diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/mempool_test.go similarity index 77% rename from internal/mempool/v1/mempool_test.go rename to internal/mempool/mempool_test.go index a7b5bcff0a..07a77fab55 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "bytes" @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -73,30 +72,37 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { } } -func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { +func setup(ctx context.Context, t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + app := &application{kvstore.NewApplication()} cc := abciclient.NewLocalCreator(app) + logger := log.TestingLogger() - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize - - appConnMem, err := cc() + appConnMem, err := cc(logger) require.NoError(t, err) - require.NoError(t, appConnMem.Start()) + require.NoError(t, appConnMem.Start(ctx)) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) - require.NoError(t, appConnMem.Stop()) + cancel() + appConnMem.Wait() }) - return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) + return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) } -func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { +func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { + t.Helper() + txs := make([]testTx, numTxs) - txInfo := mempool.TxInfo{SenderID: peerID} + txInfo := TxInfo{SenderID: peerID} rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -111,14 +117,27 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)), priority: priority, } - require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo)) + require.NoError(t, txmp.CheckTx(ctx, txs[i].tx, nil, txInfo)) } return txs } +func convertTex(in []testTx) types.Txs { + out := make([]types.Tx, len(in)) + + for idx := range in { + out[idx] = in[idx].tx + } + + return out +} + func TestTxMempool_TxsAvailable(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) txmp.EnableTxsAvailable() ensureNoTxFire := func() { @@ -144,7 +163,7 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // Execute CheckTx for some transactions and ensure TxsAvailable only fires // once. - txs := checkTxs(t, txmp, 100, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) ensureTxFire() ensureNoTxFire() @@ -160,20 +179,23 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // commit half the transactions and ensure we fire an event txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() ensureTxFire() ensureNoTxFire() // Execute CheckTx for more transactions and ensure we do not fire another // event as we're still on the same height (1). - _ = checkTxs(t, txmp, 100, 0) + _ = checkTxs(ctx, t, txmp, 100, 0) ensureNoTxFire() } func TestTxMempool_Size(t *testing.T) { - txmp := setup(t, 0) - txs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -188,7 +210,7 @@ func TestTxMempool_Size(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() require.Equal(t, len(rawTxs)/2, txmp.Size()) @@ -196,8 +218,11 @@ func TestTxMempool_Size(t *testing.T) { } func TestTxMempool_Flush(t *testing.T) { - txmp := setup(t, 0) - txs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -212,7 +237,7 @@ func TestTxMempool_Flush(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() txmp.Flush() @@ -221,8 +246,11 @@ func TestTxMempool_Flush(t *testing.T) { } func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { - txmp := setup(t, 0) - tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) // all txs request 1 gas unit require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -271,8 +299,11 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { } func TestTxMempool_ReapMaxTxs(t *testing.T) { - txmp := setup(t, 0) - tTxs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -320,24 +351,30 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { } func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 0) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes+1) _, err := rng.Read(tx) require.NoError(t, err) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0})) tx = make([]byte, txmp.config.MaxTxBytes-1) _, err = rng.Read(tx) require.NoError(t, err) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0})) } func TestTxMempool_CheckTxSamePeer(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -347,12 +384,15 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) { tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID})) + require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID})) } func TestTxMempool_CheckTxSameSender(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -367,14 +407,17 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50)) tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx1, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) - require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx2, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) } func TestTxMempool_ConcurrentTxs(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 100) rng := rand.New(rand.NewSource(time.Now().UnixNano())) checkTxDone := make(chan struct{}) @@ -383,7 +426,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { wg.Add(1) go func() { for i := 0; i < 20; i++ { - _ = checkTxs(t, txmp, 100, 0) + _ = checkTxs(ctx, t, txmp, 100, 0) dur := rng.Intn(1000-500) + 500 time.Sleep(time.Duration(dur) * time.Millisecond) } @@ -417,7 +460,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil)) txmp.Unlock() height++ @@ -438,11 +481,14 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { - txmp := setup(t, 500) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txmp := setup(ctx, t, 500) txmp.height = 100 txmp.config.TTLNumBlocks = 10 - tTxs := checkTxs(t, txmp, 100, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, 100, txmp.heightIndex.Size()) @@ -454,14 +500,14 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil)) txmp.Unlock() require.Equal(t, 95, txmp.Size()) require.Equal(t, 95, txmp.heightIndex.Size()) // check more txs at height 101 - _ = checkTxs(t, txmp, 50, 1) + _ = checkTxs(ctx, t, txmp, 50, 1) require.Equal(t, 145, txmp.Size()) require.Equal(t, 145, txmp.heightIndex.Size()) @@ -480,7 +526,7 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil)) txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) @@ -488,6 +534,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { name string err error @@ -504,10 +553,13 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { for _, tc := range cases { testCase := tc t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { return testCase.err } - txmp := setup(t, 0, WithPostCheck(postCheckFn)) + txmp := setup(ctx, t, 0, WithPostCheck(postCheckFn)) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes-1) _, err := rng.Read(tx) @@ -522,7 +574,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { } require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError) } - require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) }) } } diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 8e6f0c7bfb..2b32a7ce66 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -12,7 +12,7 @@ import ( // Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} -var _ mempool.Mempool = Mempool{} +var _ Mempool = Mempool{} func (Mempool) Lock() {} func (Mempool) Unlock() {} @@ -24,6 +24,7 @@ func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (Mempool) Update( + _ context.Context, _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, @@ -32,11 +33,11 @@ func (Mempool) Update( ) error { return nil } -func (Mempool) Flush() {} -func (Mempool) FlushAppConn() error { return nil } -func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (Mempool) EnableTxsAvailable() {} -func (Mempool) SizeBytes() int64 { return 0 } +func (Mempool) Flush() {} +func (Mempool) FlushAppConn(ctx context.Context) error { return nil } +func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (Mempool) EnableTxsAvailable() {} +func (Mempool) SizeBytes() int64 { return 0 } func (Mempool) TxsFront() *clist.CElement { return nil } func (Mempool) TxsWaitChan() <-chan struct{} { return nil } diff --git a/internal/mempool/v1/priority_queue.go b/internal/mempool/priority_queue.go similarity index 97% rename from internal/mempool/v1/priority_queue.go rename to internal/mempool/priority_queue.go index df74a92d3c..e31997397e 100644 --- a/internal/mempool/v1/priority_queue.go +++ b/internal/mempool/priority_queue.go @@ -1,17 +1,16 @@ -package v1 +package mempool import ( "container/heap" "sort" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) var _ heap.Interface = (*TxPriorityQueue)(nil) // TxPriorityQueue defines a thread-safe priority queue for valid transactions. type TxPriorityQueue struct { - mtx tmsync.RWMutex + mtx sync.RWMutex txs []*WrappedTx } diff --git a/internal/mempool/v1/priority_queue_test.go b/internal/mempool/priority_queue_test.go similarity index 99% rename from internal/mempool/v1/priority_queue_test.go rename to internal/mempool/priority_queue_test.go index c0048f388f..ddc84806da 100644 --- a/internal/mempool/v1/priority_queue_test.go +++ b/internal/mempool/priority_queue_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "math/rand" diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/reactor.go similarity index 74% rename from internal/mempool/v1/reactor.go rename to internal/mempool/reactor.go index 8ef5a6bd89..7ef80aecd1 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/reactor.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -11,7 +11,6 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -36,10 +35,11 @@ type PeerManager interface { // txs to the peers you received it from. type Reactor struct { service.BaseService + logger log.Logger cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.IDs + ids *IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -48,7 +48,6 @@ type Reactor struct { mempoolCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // peerWG is used to coordinate graceful termination of all peer broadcasting // goroutines. @@ -58,41 +57,47 @@ type Reactor struct { // Reactor. observePanic is called with the recovered value. observePanic func(interface{}) - mtx tmsync.Mutex + mtx sync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } // NewReactor returns a reference to a new reactor. func NewReactor( + ctx context.Context, logger log.Logger, cfg *config.MempoolConfig, peerMgr PeerManager, txmp *TxMempool, - mempoolCh *p2p.Channel, + chCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, -) *Reactor { +) (*Reactor, error) { + + ch, err := chCreator(ctx, getChannelDescriptor(cfg)) + if err != nil { + return nil, err + } r := &Reactor{ + logger: logger, cfg: cfg, peerMgr: peerMgr, mempool: txmp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, + ids: NewMempoolIDs(), + mempoolCh: ch, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) - return r + return r, nil } func defaultObservePanic(r interface{}) {} -// GetChannelDescriptor produces an instance of a descriptor for this +// getChannelDescriptor produces an instance of a descriptor for this // package's required channels. -func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { +func getChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -101,7 +106,7 @@ func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { } return &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, + ID: MempoolChannel, MessageType: new(protomem.Message), Priority: 5, RecvMessageCapacity: batchMsg.Size(), @@ -113,13 +118,13 @@ func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if !r.cfg.Broadcast { - r.Logger.Info("tx broadcasting is disabled") + r.logger.Info("tx broadcasting is disabled") } - go r.processMempoolCh() - go r.processPeerUpdates() + go r.processMempoolCh(ctx) + go r.processPeerUpdates(ctx) return nil } @@ -135,24 +140,14 @@ func (r *Reactor) OnStop() { // wait for all spawned peer tx broadcasting goroutines to gracefully exit r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() } // handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. // For every tx in the message, we execute CheckTx. It returns an error if an // empty set of txs are sent in an envelope or if we receive an unexpected // message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *protomem.Txs: @@ -161,13 +156,13 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { return errors.New("empty txs received from peer") } - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + txInfo := TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} if len(envelope.From) != 0 { txInfo.SenderNodeID = envelope.From } for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { + if err := r.mempool.CheckTx(ctx, types.Tx(tx), nil, txInfo); err != nil { logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -182,12 +177,12 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { r.observePanic(e) err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -195,11 +190,11 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received message", "peer", envelope.From) + r.logger.Debug("received message", "peer", envelope.From) switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) + case MempoolChannel: + err = r.handleMempoolMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%T)", chID, envelope.Message) @@ -210,23 +205,18 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // processMempoolCh implements a blocking event loop where we listen for p2p // Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processMempoolCh(ctx context.Context) { + iter := r.mempoolCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, r.mempoolCh.ID, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) + if serr := r.mempoolCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return } } } @@ -236,8 +226,8 @@ func (r *Reactor) processMempoolCh() { // goroutine or not. If not, we start one for the newly added peer. For down or // removed peers, we remove the peer from the mempool peer ID set and signal to // stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -246,8 +236,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: // Do not allow starting new tx broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -267,7 +256,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.ids.ReserveForPeer(peerUpdate.NodeID) // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) + go r.broadcastTxRoutine(ctx, peerUpdate.NodeID, closer) } } @@ -288,22 +277,18 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate) } } } -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, closer *tmsync.Closer) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement @@ -317,7 +302,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) if e := recover(); e != nil { r.observePanic(e) - r.Logger.Error( + r.logger.Error( "recovering from broadcasting mempool loop", "err", e, "stack", string(debug.Stack()), @@ -326,7 +311,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) }() for { - if !r.IsRunning() { + if !r.IsRunning() || ctx.Err() != nil { return } @@ -335,6 +320,8 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) // start from the beginning. if nextGossipTx == nil { select { + case <-ctx.Done(): + return case <-r.mempool.WaitForNextTx(): // wait until a tx is available if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { continue @@ -344,11 +331,6 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } @@ -358,7 +340,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) height := r.peerMgr.GetHeight(peerID) if height > 0 && height < memTx.height-1 { // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) continue } } @@ -368,13 +350,16 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ + if err := r.mempoolCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{memTx.tx}, }, + }); err != nil { + return } - r.Logger.Debug( + + r.logger.Debug( "gossiped tx to peer", "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, @@ -384,15 +369,11 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) select { case <-nextGossipTx.NextWaitChan(): nextGossipTx = nextGossipTx.Next() - case <-closer.Done(): // The peer is marked for removal via a PeerUpdate as the doneCh was // explicitly closed to signal we should exit. return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go new file mode 100644 index 0000000000..d99b27edb6 --- /dev/null +++ b/internal/mempool/reactor_test.go @@ -0,0 +1,411 @@ +package mempool + +import ( + "context" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +type reactorTestSuite struct { + network *p2ptest.Network + logger log.Logger + + reactors map[types.NodeID]*Reactor + mempoolChannels map[types.NodeID]*p2p.Channel + mempools map[types.NodeID]*TxMempool + kvstores map[types.NodeID]*kvstore.Application + + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates + + nodes []types.NodeID +} + +func setupReactors(ctx context.Context, t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { + t.Helper() + + cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) + + rts := &reactorTestSuite{ + logger: log.NewNopLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), + reactors: make(map[types.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempools: make(map[types.NodeID]*TxMempool, numNodes), + kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + } + + chDesc := getChannelDescriptor(cfg.Mempool) + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) + + for nodeID := range rts.network.Nodes { + rts.kvstores[nodeID] = kvstore.NewApplication() + + mempool := setup(ctx, t, 0) + rts.mempools[nodeID] = mempool + + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.mempoolChannels[nodeID], nil + } + + rts.reactors[nodeID], err = NewReactor( + ctx, + rts.logger.With("nodeID", nodeID), + cfg.Mempool, + rts.network.Nodes[nodeID].PeerManager, + mempool, + chCreator, + rts.peerUpdates[nodeID], + ) + + require.NoError(t, err) + rts.nodes = append(rts.nodes, nodeID) + + require.NoError(t, rts.reactors[nodeID].Start(ctx)) + require.True(t, rts.reactors[nodeID].IsRunning()) + } + + require.Len(t, rts.reactors, numNodes) + + t.Cleanup(func() { + for nodeID := range rts.reactors { + if rts.reactors[nodeID].IsRunning() { + require.NoError(t, rts.reactors[nodeID].Stop()) + rts.reactors[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) + } + + } + }) + + t.Cleanup(leaktest.Check(t)) + + return rts +} + +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { + t.Helper() + rts.network.Start(ctx, t) + + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + len(rts.nodes)-1, + "network does not have expected number of nodes") +} + +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...types.NodeID) { + t.Helper() + + // ensure that the transactions get fully broadcast to the + // rest of the network + wg := &sync.WaitGroup{} + for name, pool := range rts.mempools { + if !p2ptest.NodeInSlice(name, ids) { + continue + } + if len(txs) == pool.Size() { + continue + } + + wg.Add(1) + go func(pool *TxMempool) { + defer wg.Done() + require.Eventually(t, func() bool { return len(txs) == pool.Size() }, + time.Minute, + 250*time.Millisecond, + "ntx=%d, size=%d", len(txs), pool.Size(), + ) + }(pool) + } + wg.Wait() +} + +func TestReactorBroadcastDoesNotPanic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numNodes = 2 + rts := setupReactors(ctx, t, numNodes, 0) + + observePanic := func(r interface{}) { + t.Fatal("panic detected in reactor") + } + + primary := rts.nodes[0] + secondary := rts.nodes[1] + primaryReactor := rts.reactors[primary] + primaryMempool := primaryReactor.mempool + secondaryReactor := rts.reactors[secondary] + + primaryReactor.observePanic = observePanic + secondaryReactor.observePanic = observePanic + + firstTx := &WrappedTx{} + primaryMempool.insertTx(firstTx) + + // run the router + rts.start(ctx, t) + + closer := tmsync.NewCloser() + primaryReactor.peerWG.Add(1) + go primaryReactor.broadcastTxRoutine(ctx, secondary, closer) + + wg := &sync.WaitGroup{} + for i := 0; i < 50; i++ { + next := &WrappedTx{} + wg.Add(1) + go func() { + defer wg.Done() + primaryMempool.insertTx(next) + }() + } + + err := primaryReactor.Stop() + require.NoError(t, err) + primaryReactor.peerWG.Wait() + wg.Wait() +} + +func TestReactorBroadcastTxs(t *testing.T) { + numTxs := 1000 + numNodes := 10 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, uint(numTxs)) + + primary := rts.nodes[0] + secondaries := rts.nodes[1:] + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + + require.Equal(t, numTxs, rts.reactors[primary].mempool.Size()) + + rts.start(ctx, t) + + // Wait till all secondary suites (reactor) received all mempool txs from the + // primary suite (node). + rts.waitForTxns(t, convertTex(txs), secondaries...) +} + +// regression test for https://github.com/tendermint/tendermint/issues/5408 +func TestReactorConcurrency(t *testing.T) { + numTxs := 10 + numNodes := 2 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(ctx, t) + + var wg sync.WaitGroup + + for i := 0; i < runtime.NumCPU()*2; i++ { + wg.Add(2) + + // 1. submit a bunch of txs + // 2. update the whole mempool + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[primary] + + mempool.Lock() + defer mempool.Unlock() + + deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) + for i := range txs { + deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} + } + + require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) + }() + + // 1. submit a bunch of txs + // 2. update none + _ = checkTxs(ctx, t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[secondary] + + mempool.Lock() + defer mempool.Unlock() + + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + require.NoError(t, err) + }() + } + + wg.Wait() +} + +func TestReactorNoBroadcastToSender(t *testing.T) { + numTxs := 1000 + numNodes := 2 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, uint(numTxs)) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + peerID := uint16(1) + _ = checkTxs(ctx, t, rts.mempools[primary], numTxs, peerID) + + rts.start(ctx, t) + + time.Sleep(100 * time.Millisecond) + + require.Eventually(t, func() bool { + return rts.mempools[secondary].Size() == 0 + }, time.Minute, 100*time.Millisecond) +} + +func TestReactor_MaxTxBytes(t *testing.T) { + numNodes := 2 + cfg := config.TestConfig() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + // Broadcast a tx, which has the max size and ensure it's received by the + // second reactor. + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) + err := rts.reactors[primary].mempool.CheckTx( + ctx, + tx1, + nil, + TxInfo{ + SenderID: UnknownPeerID, + }, + ) + require.NoError(t, err) + + rts.start(ctx, t) + + rts.reactors[primary].mempool.Flush() + rts.reactors[secondary].mempool.Flush() + + // broadcast a tx, which is beyond the max size and ensure it's not sent + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) + err = rts.mempools[primary].CheckTx(ctx, tx2, nil, TxInfo{SenderID: UnknownPeerID}) + require.Error(t, err) +} + +func TestDontExhaustMaxActiveIDs(t *testing.T) { + // we're creating a single node network, but not starting the + // network. + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, 1, MaxActiveIDs+1) + + nodeID := rts.nodes[0] + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + // ensure the reactor does not panic (i.e. exhaust active IDs) + for i := 0; i < MaxActiveIDs+1; i++ { + rts.peerChans[nodeID] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusUp, + NodeID: peerID, + } + + require.NoError(t, rts.mempoolChannels[nodeID].Send(ctx, p2p.Envelope{ + To: peerID, + Message: &protomem.Txs{ + Txs: [][]byte{}, + }, + })) + } +} + +func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + // 0 is already reserved for UnknownPeerID + ids := NewMempoolIDs() + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + for i := 0; i < MaxActiveIDs-1; i++ { + ids.ReserveForPeer(peerID) + } + + require.Panics(t, func() { + ids.ReserveForPeer(peerID) + }) +} + +func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setupReactors(ctx, t, 2, 2) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(ctx, t) + + // disconnect peer + rts.peerChans[primary] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusDown, + NodeID: secondary, + } + time.Sleep(500 * time.Millisecond) + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, 4, UnknownPeerID) + require.Equal(t, 4, len(txs)) + require.Equal(t, 4, rts.mempools[primary].Size()) + require.Equal(t, 0, rts.mempools[secondary].Size()) +} diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index adafdf85e3..c7113c9513 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,6 +1,11 @@ package mempool import ( + "sort" + "sync" + "time" + + "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/types" ) @@ -15,3 +20,274 @@ type TxInfo struct { // SenderNodeID is the actual types.NodeID of the sender. SenderNodeID types.NodeID } + +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. +type WrappedTx struct { + // tx represents the raw binary transaction data + tx types.Tx + + // hash defines the transaction hash and the primary key used in the mempool + hash types.TxKey + + // height defines the height at which the transaction was validated at + height int64 + + // gasWanted defines the amount of gas the transaction sender requires + gasWanted int64 + + // priority defines the transaction's priority as specified by the application + // in the ResponseCheckTx response. + priority int64 + + // sender defines the transaction's sender as specified by the application in + // the ResponseCheckTx response. + sender string + + // timestamp is the time at which the node first received the transaction from + // a peer. It is used as a second dimension is prioritizing transactions when + // two transactions have the same priority. + timestamp time.Time + + // peers records a mapping of all peers that sent a given transaction + peers map[uint16]struct{} + + // heapIndex defines the index of the item in the heap + heapIndex int + + // gossipEl references the linked-list element in the gossip index + gossipEl *clist.CElement + + // removed marks the transaction as removed from the mempool. This is set + // during RemoveTx and is needed due to the fact that a given existing + // transaction in the mempool can be evicted when it is simultaneously having + // a reCheckTx callback executed. + removed bool +} + +func (wtx *WrappedTx) Size() int { + return len(wtx.tx) +} + +// TxStore implements a thread-safe mapping of valid transaction(s). +// +// NOTE: +// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative +// access is not allowed. Regardless, it is not expected for the mempool to +// need mutative access. +type TxStore struct { + mtx sync.RWMutex + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application +} + +func NewTxStore() *TxStore { + return &TxStore{ + senderTxs: make(map[string]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), + } +} + +// Size returns the total number of transactions in the store. +func (txs *TxStore) Size() int { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return len(txs.hashTxs) +} + +// GetAllTxs returns all the transactions currently in the store. +func (txs *TxStore) GetAllTxs() []*WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wTxs := make([]*WrappedTx, len(txs.hashTxs)) + i := 0 + for _, wtx := range txs.hashTxs { + wTxs[i] = wtx + i++ + } + + return wTxs +} + +// GetTxBySender returns a *WrappedTx by the transaction's sender property +// defined by the ABCI application. +func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.senderTxs[sender] +} + +// GetTxByHash returns a *WrappedTx by the transaction's hash. +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.hashTxs[hash] +} + +// IsTxRemoved returns true if a transaction by hash is marked as removed and +// false otherwise. +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx, ok := txs.hashTxs[hash] + if ok { + return wtx.removed + } + + return false +} + +// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a +// non-empty sender, we additionally store the transaction by the sender as +// defined by the ABCI application. +func (txs *TxStore) SetTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + txs.senderTxs[wtx.sender] = wtx + } + + txs.hashTxs[wtx.tx.Key()] = wtx +} + +// RemoveTx removes a *WrappedTx from the transaction store. It deletes all +// indexes of the transaction. +func (txs *TxStore) RemoveTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + delete(txs.senderTxs, wtx.sender) + } + + delete(txs.hashTxs, wtx.tx.Key()) + wtx.removed = true +} + +// TxHasPeer returns true if a transaction by hash has a given peer ID and false +// otherwise. If the transaction does not exist, false is returned. +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return false + } + + _, ok := wtx.peers[peerID] + return ok +} + +// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the +// given peerID to the WrappedTx's set of peers that sent us this transaction. +// We return true if we've already recorded the given peer for this transaction +// and false otherwise. If the transaction does not exist by hash, we return +// (nil, false). +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return nil, false + } + + if wtx.peers == nil { + wtx.peers = make(map[uint16]struct{}) + } + + if _, ok := wtx.peers[peerID]; ok { + return wtx, true + } + + wtx.peers[peerID] = struct{}{} + return wtx, false +} + +// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be +// used to build generic transaction indexes in the mempool. It accepts a +// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx +// references which is used during Insert in order to determine sorted order. If +// less returns true, a <= b. +type WrappedTxList struct { + mtx sync.RWMutex + txs []*WrappedTx + less func(*WrappedTx, *WrappedTx) bool +} + +func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { + return &WrappedTxList{ + txs: make([]*WrappedTx, 0), + less: less, + } +} + +// Size returns the number of WrappedTx objects in the list. +func (wtl *WrappedTxList) Size() int { + wtl.mtx.RLock() + defer wtl.mtx.RUnlock() + + return len(wtl.txs) +} + +// Reset resets the list of transactions to an empty list. +func (wtl *WrappedTxList) Reset() { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + wtl.txs = make([]*WrappedTx, 0) +} + +// Insert inserts a WrappedTx reference into the sorted list based on the list's +// comparator function. +func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + if i == len(wtl.txs) { + // insert at the end + wtl.txs = append(wtl.txs, wtx) + return + } + + // Make space for the inserted element by shifting values at the insertion + // index up one index. + // + // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). + wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) + wtl.txs[i] = wtx +} + +// Remove attempts to remove a WrappedTx from the sorted list. +func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + // Since the list is sorted, we evaluate all elements starting at i. Note, if + // the element does not exist, we may potentially evaluate the entire remainder + // of the list. However, a caller should not be expected to call Remove with a + // non-existing element. + for i < len(wtl.txs) { + if wtl.txs[i] == wtx { + wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) + return + } + + i++ + } +} diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/tx_test.go similarity index 99% rename from internal/mempool/v1/tx_test.go rename to internal/mempool/tx_test.go index fb4beafab8..b682460761 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/tx_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "fmt" diff --git a/internal/mempool/types.go b/internal/mempool/types.go new file mode 100644 index 0000000000..05d4ba3e3e --- /dev/null +++ b/internal/mempool/types.go @@ -0,0 +1,144 @@ +package mempool + +import ( + "context" + "fmt" + "math" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + MempoolChannel = p2p.ChannelID(0x30) + + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + MaxActiveIDs = math.MaxUint16 +) + +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// applications can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). + ReapMaxTxs(max int) types.Txs + + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. + Lock() + + // Unlock unlocks the mempool. + Unlock() + + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. + Update( + ctx context.Context, + blockHeight int64, + blockTxs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, + ) error + + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. + FlushAppConn(context.Context) error + + // Flush removes all transactions from the mempool and caches. + Flush() + + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} + + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() + + // Size returns the number of transactions in the mempool. + Size() int + + // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 +} + +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error + +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. +func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { + return func(tx types.Tx) error { + txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + + if txSize > maxBytes { + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + } + + return nil + } +} + +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. +func PostCheckMaxGas(maxGas int64) PostCheckFunc { + return func(tx types.Tx, res *abci.ResponseCheckTx) error { + if maxGas == -1 { + return nil + } + if res.GasWanted < 0 { + return fmt.Errorf("gas wanted %d is negative", + res.GasWanted) + } + if res.GasWanted > maxGas { + return fmt.Errorf("gas wanted %d is greater than max gas %d", + res.GasWanted, maxGas) + } + + return nil + } +} diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go deleted file mode 100644 index acfaec2835..0000000000 --- a/internal/mempool/v0/bench_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package v0 - -import ( - "context" - "encoding/binary" - "sync/atomic" - "testing" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/internal/mempool" -) - -func BenchmarkReap(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - mp.config.Size = 100000 - - size := 10000 - for i := 0; i < size; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) - } -} - -func BenchmarkCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 1000000 - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - b.StartTimer() - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkParallelCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 100000000 - - var txcnt uint64 - next := func() uint64 { - return atomic.AddUint64(&txcnt, 1) - 1 - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, next()) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkCheckDuplicateTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - mp.config.Size = 1000000 - - for i := 0; i < b.N; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err == nil { - b.Fatal("tx should be duplicate") - } - } -} diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go deleted file mode 100644 index 5bf2c7603c..0000000000 --- a/internal/mempool/v0/cache_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package v0 - -import ( - "context" - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -func TestCacheAfterUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // reAddIndices & txsInCache can have elements > numTxsToCreate - // also assumes max index is 255 for convenience - // txs in cache also checks order of elements - tests := []struct { - numTxsToCreate int - updateIndices []int - reAddIndices []int - txsInCache []int - }{ - {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works - {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache - {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache - {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe - } - for tcIndex, tc := range tests { - for i := 0; i < tc.numTxsToCreate; i++ { - tx := types.Tx{byte(i)} - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - updateTxs := []types.Tx{} - for _, v := range tc.updateIndices { - tx := types.Tx{byte(v)} - updateTxs = append(updateTxs, tx) - } - err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - - for _, v := range tc.reAddIndices { - tx := types.Tx{byte(v)} - _ = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - } - - cache := mp.cache.(*mempool.LRUTxCache) - node := cache.GetList().Front() - counter := 0 - for node != nil { - require.NotEqual(t, len(tc.txsInCache), counter, - "cache larger than expected on testcase %d", tcIndex) - - nodeVal := node.Value.(types.TxKey) - expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) - // Reference for reading the errors: - // >>> sha256('\x00').hexdigest() - // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' - // >>> sha256('\x01').hexdigest() - // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' - // >>> sha256('\x02').hexdigest() - // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - - require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) - counter++ - node = node.Next() - } - require.Equal(t, len(tc.txsInCache), counter, - "cache smaller than expected on testcase %d", tcIndex) - mp.Flush() - } -} diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go deleted file mode 100644 index 2d71c54371..0000000000 --- a/internal/mempool/v0/clist_mempool.go +++ /dev/null @@ -1,675 +0,0 @@ -package v0 - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -// CListMempool is an ordered in-memory pool for transactions before they are -// proposed in a consensus round. Transaction validity is checked using the -// CheckTx abci message before the transaction is added to the pool. The -// mempool uses a concurrent list structure for storing transactions that can -// be efficiently accessed by multiple concurrent readers. -type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - config *config.MempoolConfig - - // Exclusive mutex for Update method to prevent concurrent execution of - // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. - updateMtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc - - txs *clist.CList // concurrent linked-list of good txs - proxyAppConn proxy.AppConnMempool - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache mempool.TxCache - - logger log.Logger - metrics *mempool.Metrics -} - -var _ mempool.Mempool = &CListMempool{} - -// CListMempoolOption sets an optional parameter on the mempool. -type CListMempoolOption func(*CListMempool) - -// NewCListMempool returns a new mempool with the given configuration and -// connection to an application. -func NewCListMempool( - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...CListMempoolOption, -) *CListMempool { - - mp := &CListMempool{ - config: cfg, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: mempool.NopMetrics(), - } - - if cfg.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } else { - mp.cache = mempool.NopTxCache{} - } - - proxyAppConn.SetResponseCallback(mp.globalCb) - - for _, option := range options { - option(mp) - } - - return mp -} - -// NOTE: not thread safe - should only be called once, on startup -func (mem *CListMempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *CListMempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { - return func(mem *CListMempool) { mem.metrics = metrics } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Lock() { - mem.updateMtx.Lock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Unlock() { - mem.updateMtx.Unlock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Size() int { - return mem.txs.Len() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync(context.Background()) -} - -// XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. -func (mem *CListMempool) Flush() { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - _ = atomic.SwapInt64(&mem.txsBytes, 0) - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - return true - }) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - mem.updateMtx.RLock() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.updateMtx.RUnlock() - - txSize := len(tx) - - if err := mem.isFull(txSize); err != nil { - return err - } - - if txSize > mem.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: mem.config.MaxTxBytes, - Actual: txSize, - } - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - // NOTE: proxyAppConn may error if tx buffer is full - if err := mem.proxyAppConn.Error(); err != nil { - return err - } - - if !mem.cache.Push(tx) { // if the transaction already exists in the cache - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - if loaded { - return types.ErrTxInCache - } - } - - mem.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - mem.cache.Remove(tx) - return err - } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// -// When rechecking, we don't need the peerID, so the recheck callback happens -// here. -func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTx to record PeerID who sent us the tx. -func (mem *CListMempool) reqResCb( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - externalCb func(*abci.Response), -) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, peerID, peerP2PID, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *CListMempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(memTx.tx.Key(), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(tx.Key()) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - if e, ok := mem.txsMap.Load(txKey); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), false) - return nil - } - return errors.New("transaction not found") - } - return errors.New("invalid transaction found") -} - -func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() - ) - - if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: memSize, - MaxTxs: mem.config.Size, - TxsBytes: txsBytes, - MaxTxsBytes: mem.config.MaxTxsBytes, - } - } - - return nil -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *CListMempool) resCbFirstTime( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - res *abci.Response, -) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Check mempool isn't full again to reduce the chance of exceeding the - // limits. - if err := mem.isFull(len(tx)); err != nil { - // remove from cache (mempool might have a space later) - mem.cache.Remove(tx) - mem.logger.Error(err.Error()) - return - } - - memTx := &mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(peerID, true) - mem.addTx(memTx) - mem.logger.Debug( - "added good transaction", - "tx", types.Tx(tx).Hash(), - "res", r, - "height", memTx.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Debug( - "rejected bad transaction", - "tx", types.Tx(tx).Hash(), - "peerID", peerP2PID, - "res", r, - "err", postCheckErr, - ) - mem.metrics.FailedTxs.Add(1) - - if !mem.config.KeepInvalidTxsInCache { - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - } - - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) - } - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *CListMempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - var ( - totalGas int64 - runningSize int64 - ) - - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) - - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) - // Check total size requirement - if maxBytes > -1 && runningSize+dataSize > maxBytes { - return txs[:len(txs)-1] - } - - runningSize += dataSize - - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs[:len(txs)-1] - } - totalGas = newTotalGas - } - return txs -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - if max < 0 { - max = mem.txs.Len() - } - - txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) Update( - height int64, - txs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - preCheck mempool.PreCheckFunc, - postCheck mempool.PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - for i, tx := range txs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // Add valid committed tx to the cache (if missing). - _ = mem.cache.Push(tx) - } else if !mem.config.KeepInvalidTxsInCache { - // Allow invalid transactions to be resubmitted. - mem.cache.Remove(tx) - } - - // Remove committed tx from the mempool. - // - // Note an evil proposer can drop valid txs! - // Mempool before: - // 100 -> 101 -> 102 - // Block, proposed by an evil proposer: - // 101 -> 102 - // Mempool after: - // 100 - // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - mem.removeTx(tx, e.(*clist.CElement), false) - } - } - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") - } - - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - ctx := context.Background() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: memTx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // No need in retrying since memTx will be rechecked after next block. - mem.logger.Error("Can't check tx", "err", err) - } - } - - _, err := mem.proxyAppConn.FlushAsync(ctx) - if err != nil { - mem.logger.Error("Can't flush txs", "err", err) - } -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go deleted file mode 100644 index 131e500c03..0000000000 --- a/internal/mempool/v0/clist_mempool_test.go +++ /dev/null @@ -1,620 +0,0 @@ -package v0 - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - mrand "math/rand" - "os" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abciserver "github.com/tendermint/tendermint/abci/server" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// A cleanupFunc cleans up any config / test files created for a particular -// test. -type cleanupFunc func() - -func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { - return newMempoolWithAppAndConfig(cc, config.ResetTestRoot("mempool_test")) -} - -func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc() - appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { - panic(err) - } - - mp := NewCListMempool(cfg.Mempool, appConnMem, 0) - mp.SetLogger(log.TestingLogger()) - - return mp, func() { os.RemoveAll(cfg.RootDir) } -} - -func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - t.Fatal("Expected not to fire") - case <-timer.C: - } -} - -func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - case <-timer.C: - t.Fatal("Expected to fire") - } -} - -func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { - txs := make(types.Txs, count) - txInfo := mempool.TxInfo{SenderID: peerID} - for i := 0; i < count; i++ { - txBytes := make([]byte, 20) - txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } - if err := mp.CheckTx(context.Background(), txBytes, nil, txInfo); err != nil { - // Skip invalid txs. - // TestMempoolFilters will fail otherwise. It asserts a number of txs - // returned. - if types.IsPreCheckError(err) { - continue - } - t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) - } - } - return txs -} - -func TestReapMaxBytesMaxGas(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // Ensure gas calculation behaves as expected - checkTxs(t, mp, 1, mempool.UnknownPeerID) - tx0 := mp.TxsFront().Value.(*mempoolTx) - // assert that kv store has gas wanted = 1. - require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") - require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") - // ensure each tx is 20 bytes long - require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") - mp.Flush() - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - maxBytes int64 - maxGas int64 - expectedNumTxs int - }{ - {20, -1, -1, 20}, - {20, -1, 0, 0}, - {20, -1, 10, 10}, - {20, -1, 30, 20}, - {20, 0, -1, 0}, - {20, 0, 10, 0}, - {20, 10, 10, 0}, - {20, 28, 10, 1}, // account for overhead in Data{} - {20, 240, 5, 5}, - {20, 280, -1, 10}, - {20, 280, 10, 10}, - {20, 280, 15, 10}, - {20, 20000, -1, 20}, - {20, 20000, 5, 5}, - {20, 20000, 30, 20}, - } - for tcIndex, tt := range tests { - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) - assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", - len(got), tt.expectedNumTxs, tcIndex) - mp.Flush() - } -} - -func TestMempoolFilters(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - emptyTxArr := []types.Tx{[]byte{}} - - nopPreFilter := func(tx types.Tx) error { return nil } - nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - preFilter mempool.PreCheckFunc - postFilter mempool.PostCheckFunc - expectedNumTxs int - }{ - {10, nopPreFilter, nopPostFilter, 10}, - {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, mempool.PreCheckMaxBytes(28), nopPostFilter, 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, - {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, - {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, - {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, - {10, mempool.PreCheckMaxBytes(28), mempool.PostCheckMaxGas(1), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, - } - for tcIndex, tt := range tests { - err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) - require.NoError(t, err) - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) - mp.Flush() - } -} - -func TestMempoolUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - // 1. Adds valid txs to the cache - { - err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. Removes valid txs from the mempool - { - err := mp.CheckTx(context.Background(), []byte{0x02}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - } - - // 3. Removes invalid transactions from the cache and the mempool (if present) - { - err := mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - - err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestMempool_KeepInvalidTxsInCache(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - wcfg := config.DefaultConfig() - wcfg.Mempool.KeepInvalidTxsInCache = true - mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) - defer cleanup() - - // 1. An invalid transaction must remain in the cache after Update - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, 1) - - err := mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // simulate new block - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: a}) - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: b}) - err = mp.Update(1, []types.Tx{a, b}, - []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) - require.NoError(t, err) - - // a must be added to the cache - err = mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - - // b must remain in the cache - err = mp.CheckTx(context.Background(), b, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. An invalid transaction must remain in the cache - { - a := make([]byte, 8) - binary.BigEndian.PutUint64(a, 0) - - // remove a from the cache to test (2) - mp.cache.Remove(a) - - err := mp.CheckTx(context.Background(), a, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestTxsAvailable(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - mp.EnableTxsAvailable() - - timeoutMS := 500 - - // with no txs, it shouldnt fire - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch of txs, it should only fire once - txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // call update with half the txs. - // it should fire once now for the new height - // since there are still txs left - committedTxs, txs := txs[:50], txs[50:] - if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) //nolint: gocritic - if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs, it should only fire once - checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) -} - -func TestSerialReap(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - - mp, cleanup := newMempoolWithApp(cc) - defer cleanup() - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err := appConnCon.Start() - require.Nil(t, err) - - cacheMap := make(map[string]struct{}) - deliverTxsRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - - // This will succeed - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - _, cached := cacheMap[string(txBytes)] - if cached { - require.NotNil(t, err, "expected error for cached tx") - } else { - require.Nil(t, err, "expected no err for uncached tx") - } - cacheMap[string(txBytes)] = struct{}{} - - // Duplicates are cached and should return error - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") - } - } - - reapCheck := func(exp int) { - txs := mp.ReapMaxBytesMaxGas(-1, -1) - require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) - } - - updateRange := func(start, end int) { - txs := make([]types.Tx, 0) - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - txs = append(txs, txBytes) - } - if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - } - - commitRange := func(start, end int) { - ctx := context.Background() - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - if err != nil { - t.Errorf("client error committing tx: %v", err) - } - if res.IsErr() { - t.Errorf("error committing tx. Code:%v result:%X log:%v", - res.Code, res.Data, res.Log) - } - } - res, err := appConnCon.CommitSync(ctx) - if err != nil { - t.Errorf("client error committing: %v", err) - } - if len(res.Data) != 8 { - t.Errorf("error committing. Hash:%X", res.Data) - } - } - - //---------------------------------------- - - // Deliver some txs. - deliverTxsRange(0, 100) - - // Reap the txs. - reapCheck(100) - - // Reap again. We should get the same amount - reapCheck(100) - - // Deliver 0 to 999, we should reap 900 new txs - // because 100 were already counted. - deliverTxsRange(0, 1000) - - // Reap the txs. - reapCheck(1000) - - // Reap again. We should get the same amount - reapCheck(1000) - - // Commit from the conensus AppConn - commitRange(0, 500) - updateRange(0, 500) - - // We should have 500 left. - reapCheck(500) - - // Deliver 100 invalid txs and 100 valid txs - deliverTxsRange(900, 1100) - - // We should have 600 now. - reapCheck(600) -} - -func TestMempool_CheckTxChecksTxSize(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mempl, cleanup := newMempoolWithApp(cc) - defer cleanup() - - maxTxSize := mempl.config.MaxTxBytes - - testCases := []struct { - len int - err bool - }{ - // check small txs. no error - 0: {10, false}, - 1: {1000, false}, - 2: {1000000, false}, - - // check around maxTxSize - 3: {maxTxSize - 1, false}, - 4: {maxTxSize, false}, - 5: {maxTxSize + 1, true}, - } - - for i, testCase := range testCases { - caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) - - tx := tmrand.Bytes(testCase.len) - - err := mempl.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - bv := gogotypes.BytesValue{Value: tx} - bz, err2 := bv.Marshal() - require.NoError(t, err2) - require.Equal(t, len(bz), proto.Size(&bv), caseString) - - if !testCase.err { - require.NoError(t, err, caseString) - } else { - require.Equal(t, err, types.ErrTxTooLarge{ - Max: maxTxSize, - Actual: testCase.len, - }, caseString) - } - } -} - -func TestMempoolTxsBytes(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - cfg := config.ResetTestRoot("mempool_test") - cfg.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // 1. zero by default - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 2. len(tx) after CheckTx - err := mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 1, mp.SizeBytes()) - - // 3. zero again after tx is removed by Update - err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 4. zero after Flush - err = mp.CheckTx(context.Background(), []byte{0x02, 0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 2, mp.SizeBytes()) - - mp.Flush() - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - err = mp.CheckTx( - context.Background(), - []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, - nil, - mempool.TxInfo{}, - ) - require.NoError(t, err) - - err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) - if assert.Error(t, err) { - assert.IsType(t, types.ErrMempoolIsFull{}, err) - } - - // 6. zero after tx is rechecked and removed due to not being valid anymore - app2 := kvstore.NewApplication() - cc = abciclient.NewLocalCreator(app2) - mp, cleanup = newMempoolWithApp(cc) - defer cleanup() - - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(0)) - - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := appConnCon.Stop(); err != nil { - t.Error(err) - } - }) - ctx := context.Background() - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.NoError(t, err) - require.EqualValues(t, 0, res.Code) - res2, err := appConnCon.CommitSync(ctx) - require.NoError(t, err) - require.NotEmpty(t, res2.Data) - - // Pretend like we committed nothing so txBytes gets rechecked and removed. - err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - // 7. Test RemoveTxByKey function - err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) - assert.EqualValues(t, 8, mp.SizeBytes()) - -} - -// This will non-deterministically catch some concurrency failures like -// https://github.com/tendermint/tendermint/issues/3509 -// TODO: all of the tests should probably also run using the remote proxy app -// since otherwise we're not actually testing the concurrency of the mempool here! -func TestMempoolRemoteAppConcurrency(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - app := kvstore.NewApplication() - cc, server := newRemoteApp(t, sockPath, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - cfg := config.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // generate small number of txs - nTxs := 10 - txLen := 200 - txs := make([]types.Tx, nTxs) - for i := 0; i < nTxs; i++ { - txs[i] = tmrand.Bytes(txLen) - } - - // simulate a group of peers sending them over and over - N := cfg.Mempool.Size - maxPeers := 5 - for i := 0; i < N; i++ { - peerID := mrand.Intn(maxPeers) - txNum := mrand.Intn(nTxs) - tx := txs[txNum] - - // this will err with ErrTxInCache many times ... - mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error - } - err := mp.FlushAppConn() - require.NoError(t, err) -} - -// caller must close server -func newRemoteApp( - t *testing.T, - addr string, - app abci.Application, -) ( - clientCreator abciclient.Creator, - server service.Service, -) { - clientCreator = abciclient.NewRemoteCreator(addr, "socket", true) - - // Start server - server = abciserver.NewSocketServer(addr, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - return clientCreator, server -} - -func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { - responses := make([]*abci.ResponseDeliverTx, 0, n) - for i := 0; i < n; i++ { - responses = append(responses, &abci.ResponseDeliverTx{Code: code}) - } - return responses -} diff --git a/internal/mempool/v0/doc.go b/internal/mempool/v0/doc.go deleted file mode 100644 index 3b5d0d20d4..0000000000 --- a/internal/mempool/v0/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// The mempool pushes new txs onto the proxyAppConn. -// It gets a stream of (req, res) tuples from the proxy. -// The mempool stores good txs in a concurrent linked-list. - -// Multiple concurrent go-routines can traverse this linked-list -// safely by calling .NextWait() on each element. - -// So we have several go-routines: -// 1. Consensus calling Update() and ReapMaxBytesMaxGas() synchronously -// 2. Many mempool reactor's peer routines calling CheckTx() -// 3. Many mempool reactor's peer routines traversing the txs linked list - -// To manage these goroutines, there are three methods of locking. -// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -// 2. Mutations to the linked-list elements are atomic -// 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx - -// Garbage collection of old elements from mempool.txs is handlde via the -// DetachPrev() call, which makes old elements not reachable by peer -// broadcastTxRoutine(). - -// TODO: Better handle abci client errors. (make it automatically handle connection errors) -package v0 diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go deleted file mode 100644 index 1183216457..0000000000 --- a/internal/mempool/v0/reactor.go +++ /dev/null @@ -1,392 +0,0 @@ -package v0 - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -var ( - _ service.Service = (*Reactor)(nil) - _ p2p.Wrapper = (*protomem.Message)(nil) -) - -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - -// Reactor implements a service that contains mempool of txs that are broadcasted -// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping -// txs to the peers you received it from. -type Reactor struct { - service.BaseService - - cfg *config.MempoolConfig - mempool *CListMempool - ids *mempool.IDs - - // XXX: Currently, this is the only way to get information about a peer. Ideally, - // we rely on message-oriented communication to get necessary peer data. - // ref: https://github.com/tendermint/tendermint/issues/5670 - peerMgr PeerManager - - mempoolCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - - // peerWG is used to coordinate graceful termination of all peer broadcasting - // goroutines. - peerWG sync.WaitGroup - - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer -} - -// NewReactor returns a reference to a new reactor. -func NewReactor( - logger log.Logger, - cfg *config.MempoolConfig, - peerMgr PeerManager, - mp *CListMempool, - mempoolCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, -) *Reactor { - - r := &Reactor{ - cfg: cfg, - peerMgr: peerMgr, - mempool: mp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), - } - - r.BaseService = *service.NewBaseService(logger, "Mempool", r) - return r -} - -// GetChannelDescriptor produces an instance of a descriptor for this -// package's required channels. -func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { - largestTx := make([]byte, cfg.MaxTxBytes) - batchMsg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, - }, - } - - return &p2p.ChannelDescriptor{ - ID: mempool.MempoolChannel, - MessageType: new(protomem.Message), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - } -} - -// OnStart starts separate go routines for each p2p Channel and listens for -// envelopes on each. In addition, it also listens for peer updates and handles -// messages on that p2p channel accordingly. The caller must be sure to execute -// OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - if !r.cfg.Broadcast { - r.Logger.Info("tx broadcasting is disabled") - } - - go r.processMempoolCh() - go r.processPeerUpdates() - - return nil -} - -// OnStop stops the reactor by signaling to all spawned goroutines to exit and -// blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // wait for all spawned peer tx broadcasting goroutines to gracefully exit - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() -} - -// handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. -// For every tx in the message, we execute CheckTx. It returns an error if an -// empty set of txs are sent in an envelope or if we receive an unexpected -// message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) - - switch msg := envelope.Message.(type) { - case *protomem.Txs: - protoTxs := msg.GetTxs() - if len(protoTxs) == 0 { - return errors.New("empty txs received from peer") - } - - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} - if len(envelope.From) != 0 { - txInfo.SenderNodeID = envelope.From - } - - for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) - } - } - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) - } - }() - - r.Logger.Debug("received message", "peer", envelope.From) - - switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) - - default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) - } - - return err -} - -// processMempoolCh implements a blocking event loop where we listen for p2p -// Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return - } - } -} - -// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we -// check if the reactor is running and if we've already started a tx broadcasting -// goroutine or not. If not, we start one for the newly added peer. For down or -// removed peers, we remove the peer from the mempool peer ID set and signal to -// stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) - - r.mtx.Lock() - defer r.mtx.Unlock() - - switch peerUpdate.Status { - case p2p.PeerStatusUp: - // Do not allow starting new tx broadcast loops after reactor shutdown - // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. - if !r.IsRunning() { - return - } - - if r.cfg.Broadcast { - // Check if we've already started a goroutine for this peer, if not we create - // a new done channel so we can explicitly close the goroutine if the peer - // is later removed, we increment the waitgroup so the reactor can stop - // safely, and finally start the goroutine to broadcast txs to that peer. - _, ok := r.peerRoutines[peerUpdate.NodeID] - if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) - - r.ids.ReserveForPeer(peerUpdate.NodeID) - - // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) - } - } - - case p2p.PeerStatusDown: - r.ids.Reclaim(peerUpdate.NodeID) - - // Check if we've started a tx broadcasting goroutine for this peer. - // If we have, we signal to terminate the goroutine via the channel's closure. - // This will internally decrement the peer waitgroup and remove the peer - // from the map of peer tx broadcasting goroutines. - closer, ok := r.peerRoutines[peerUpdate.NodeID] - if ok { - closer.Close() - } - } -} - -// processPeerUpdates initiates a blocking process where we listen for and handle -// PeerUpdate messages. When the reactor is stopped, we will catch the signal and -// close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - - for { - select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return - } - } -} - -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { - peerMempoolID := r.ids.GetForPeer(peerID) - var next *clist.CElement - - // remove the peer ID from the map of routines and mark the waitgroup as done - defer func() { - r.mtx.Lock() - delete(r.peerRoutines, peerID) - r.mtx.Unlock() - - r.peerWG.Done() - - if e := recover(); e != nil { - r.Logger.Error( - "recovering from broadcasting mempool loop", - "err", e, - "stack", string(debug.Stack()), - ) - } - }() - - for { - if !r.IsRunning() { - return - } - - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-r.mempool.TxsWaitChan(): // wait until a tx is available - if next = r.mempool.TxsFront(); next == nil { - continue - } - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } - - memTx := next.Value.(*mempoolTx) - - if r.peerMgr != nil { - height := r.peerMgr.GetHeight(peerID) - if height > 0 && height < memTx.Height()-1 { - // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - - // NOTE: Transaction batching was disabled due to: - // https://github.com/tendermint/tendermint/issues/5796 - - if _, ok := memTx.senders.Load(peerMempoolID); !ok { - // Send the mempool tx to the corresponding peer. Note, the peer may be - // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{memTx.tx}, - }, - } - r.Logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", memTx.tx.Hash()), - "peer", peerID, - ) - } - - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } -} diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go deleted file mode 100644 index 69582284b1..0000000000 --- a/internal/mempool/v0/reactor_test.go +++ /dev/null @@ -1,392 +0,0 @@ -package v0 - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChnnels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*CListMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*CListMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := GetChannelDescriptor(config) - chDesc.RecvBufferCapacity = int(chBuf) - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) - - mempool, memCleanup := newMempoolWithApp(cc) - t.Cleanup(memCleanup) - mempool.SetLogger(rts.logger) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - config, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChnnels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor %s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor %s did not stop", id) - } - - for _, mch := range rts.mempoolChnnels { - require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) - } -} - -func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) { - t.Helper() - - fn := func(pool *CListMempool) { - for pool.Size() < len(txs) { - time.Sleep(50 * time.Millisecond) - } - - reapedTxs := pool.ReapMaxTxs(len(txs)) - require.Equal(t, len(txs), len(reapedTxs)) - for i, tx := range txs { - require.Equalf(t, - tx, - reapedTxs[i], - "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], - ) - } - } - - if len(ids) == 1 { - fn(rts.reactors[ids[0]].mempool) - return - } - - wg := &sync.WaitGroup{} - for id := range rts.mempools { - if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { - continue - } - - wg.Add(1) - func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) - } - - wg.Wait() -} - -func TestReactorBroadcastTxs(t *testing.T) { - numTxs := 1000 - numNodes := 10 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondaries := rts.nodes[1:] - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - - // run the router - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, txs, secondaries...) - - for _, pool := range rts.mempools { - require.Equal(t, len(txs), pool.Size()) - } - - rts.assertMempoolChannelsDrained(t) -} - -// regression test for https://github.com/tendermint/tendermint/issues/5408 -func TestReactorConcurrency(t *testing.T) { - numTxs := 5 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - var wg sync.WaitGroup - - for i := 0; i < 1000; i++ { - wg.Add(2) - - // 1. submit a bunch of txs - // 2. update the whole mempool - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[primary] - - mempool.Lock() - defer mempool.Unlock() - - deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) - for i := range txs { - deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} - } - - require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil)) - }() - - // 1. submit a bunch of txs - // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[secondary] - - mempool.Lock() - defer mempool.Unlock() - - err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) - require.NoError(t, err) - }() - - // flush the mempool - rts.mempools[secondary].Flush() - } - - wg.Wait() -} - -func TestReactorNoBroadcastToSender(t *testing.T) { - numTxs := 1000 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, uint(numTxs)) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - peerID := uint16(1) - _ = checkTxs(t, rts.mempools[primary], numTxs, peerID) - - rts.start(t) - - time.Sleep(100 * time.Millisecond) - - require.Eventually(t, func() bool { - return rts.mempools[secondary].Size() == 0 - }, time.Minute, 100*time.Millisecond) - - rts.assertMempoolChannelsDrained(t) -} - -func TestReactor_MaxTxBytes(t *testing.T) { - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - // Broadcast a tx, which has the max size and ensure it's received by the - // second reactor. - tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) - err := rts.reactors[primary].mempool.CheckTx( - context.Background(), - tx1, - nil, - mempool.TxInfo{ - SenderID: mempool.UnknownPeerID, - }, - ) - require.NoError(t, err) - - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, []types.Tx{tx1}, secondary) - - rts.reactors[primary].mempool.Flush() - rts.reactors[secondary].mempool.Flush() - - // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) - require.Error(t, err) - - rts.assertMempoolChannelsDrained(t) -} - -func TestDontExhaustMaxActiveIDs(t *testing.T) { - cfg := config.TestConfig() - - // we're creating a single node network, but not starting the - // network. - rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1) - - nodeID := rts.nodes[0] - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < mempool.MaxActiveIDs+1; i++ { - rts.peerChans[nodeID] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: peerID, - } - - rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{}, - }, - } - } - - require.Eventually( - t, - func() bool { - for _, mch := range rts.mempoolChnnels { - if len(mch.Out) > 0 { - return false - } - } - - return true - }, - time.Minute, - 10*time.Millisecond, - ) - - rts.assertMempoolChannelsDrained(t) -} - -func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - // 0 is already reserved for UnknownPeerID - ids := mempool.NewMempoolIDs() - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - for i := 0; i < mempool.MaxActiveIDs-1; i++ { - ids.ReserveForPeer(peerID) - } - - require.Panics(t, func() { - ids.ReserveForPeer(peerID) - }) -} - -func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, 2, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - // disconnect peer - rts.peerChans[primary] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary, - } -} diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go deleted file mode 100644 index a12fbc51ba..0000000000 --- a/internal/mempool/v1/mempool.go +++ /dev/null @@ -1,856 +0,0 @@ -package v1 - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync/atomic" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -var _ mempool.Mempool = (*TxMempool)(nil) - -// TxMempoolOption sets an optional parameter on the TxMempool. -type TxMempoolOption func(*TxMempool) - -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. -type TxMempool struct { - logger log.Logger - metrics *mempool.Metrics - config *config.MempoolConfig - proxyAppConn proxy.AppConnMempool - - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache mempool.TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList - - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc -} - -func NewTxMempool( - logger log.Logger, - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...TxMempoolOption, -) *TxMempool { - - txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: height, - cache: mempool.NopTxCache{}, - metrics: mempool.NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), - } - - if cfg.CacheSize > 0 { - txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } - - proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) - - for _, opt := range options { - opt(txmp) - } - - return txmp -} - -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.postCheck = f } -} - -// WithMetrics sets the mempool's metrics collector. -func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { - return func(txmp *TxMempool) { txmp.metrics = metrics } -} - -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} - -// Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} - -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} - -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} - -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// NOTE: The caller must obtain a write-lock via Lock() prior to execution. -func (txmp *TxMempool) FlushAppConn() error { - return txmp.proxyAppConn.FlushSync(context.Background()) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} - -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() -} - -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. -func (txmp *TxMempool) EnableTxsAvailable() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() - - txmp.txsAvailable = make(chan struct{}, 1) -} - -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} - -// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires -// a read-lock attempts to execute the application's CheckTx ABCI method via -// CheckTxAsync. We return an error if any of the following happen: -// -// - The CheckTxAsync execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. -// -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. -// -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. -func (txmp *TxMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txSize := len(tx) - if txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, - } - } - - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } - - txHash := tx.Key() - - // We add the transaction to the mempool's cache and if the transaction already - // exists, i.e. false is returned, then we check if we've seen this transaction - // from the same sender and error if we have. Otherwise, we return nil. - if !txmp.cache.Push(tx) { - wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - if wtx != nil && ok { - // We already have the transaction stored and the we've already seen this - // transaction from txInfo.SenderID. - return types.ErrTxInCache - } - - txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - txmp.cache.Remove(tx) - return err - } - - reqRes.SetCallback(func(res *abci.Response) { - if txmp.recheckCursor != nil { - panic("recheck cursor is non-nil in CheckTx callback") - } - - wtx := &WrappedTx{ - tx: tx, - hash: txHash, - timestamp: time.Now().UTC(), - height: txmp.height, - } - txmp.initTxCallback(wtx, res, txInfo) - - if cb != nil { - cb(res) - } - }) - - return nil -} - -func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.Lock() - defer txmp.Unlock() - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { - txmp.removeTx(wtx, false) - return nil - } - - return errors.New("transaction not found") -} - -// Flush flushes out the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. -func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() - - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) - } - - atomic.SwapInt64(&txmp.sizeBytes, 0) - txmp.cache.Reset() -} - -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] - } - - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] - } - - totalGas = gas - } - - return txs -} - -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } - - cap := tmmath.MinInt(numTxs, max) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - - return txs -} - -// Update iterates over all the transactions provided by the caller, i.e. the -// block producer, and removes them from the cache (if applicable) and removes -// the transactions from the main transaction store and associated indexes. -// Finally, if there are trainsactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. -// -// NOTE: -// - The caller must explicitly acquire a write-lock via Lock(). -func (txmp *TxMempool) Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn mempool.PreCheckFunc, - newPostFn mempool.PostCheckFunc, -) error { - - txmp.height = blockHeight - txmp.notifiedTxsAvailable = false - - if newPreFn != nil { - txmp.preCheck = newPreFn - } - if newPostFn != nil { - txmp.postCheck = newPostFn - } - - for i, tx := range blockTxs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) - _ = txmp.cache.Push(tx) - } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted - txmp.cache.Remove(tx) - } - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { - txmp.removeTx(wtx, false) - } - } - - txmp.purgeExpiredTxs(blockHeight) - - // If there any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. - if txmp.Size() > 0 { - if txmp.config.Recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs() - } else { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - return nil -} - -// initTxCallback performs the initial, i.e. the first, callback after CheckTx -// has been executed by the ABCI application. In other words, initTxCallback is -// called after executing CheckTx when we see a unique transaction for the first -// time. CheckTx can be called again for the same transaction at a later point -// in time when re-checking, however, this callback will not be called. -// -// After the ABCI application executes CheckTx, initTxCallback is called with -// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool, -// we execute that first. If there is no error from postCheck (if defined) and -// the ABCI CheckTx response code is OK, we attempt to insert the transaction. -// -// When attempting to insert the transaction, we first check if there is -// sufficient capacity. If there is sufficient capacity, the transaction is -// inserted into the txStore and indexed across all indexes. Otherwise, if the -// mempool is full, we attempt to find a lower priority transaction to evict in -// place of the new incoming transaction. If no such transaction exists, the -// new incoming transaction is rejected. -// -// If the new incoming transaction fails CheckTx or postCheck fails, we reject -// the new incoming transaction. -// -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) { - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - return - } - - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) - } - - if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { - // ignore bad transactions - txmp.logger.Info( - "rejected bad transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer_id", txInfo.SenderNodeID, - "code", checkTxRes.CheckTx.Code, - "post_check_err", err, - ) - - txmp.metrics.FailedTxs.Add(1) - - if !txmp.config.KeepInvalidTxsInCache { - txmp.cache.Remove(wtx.tx) - } - if err != nil { - checkTxRes.CheckTx.MempoolError = err.Error() - } - return - } - - sender := checkTxRes.CheckTx.Sender - priority := checkTxRes.CheckTx.Priority - - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "sender", sender, - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - } - - if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache. - txmp.cache.Remove(wtx.tx) - txmp.logger.Error( - "rejected incoming good transaction; mempool full", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err.Error(), - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) - txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, - ) - txmp.metrics.EvictedTxs.Add(1) - } - } - - wtx.gasWanted = checkTxRes.CheckTx.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } - - txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) - txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) - txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "height", txmp.height, - "num_txs", txmp.Size(), - ) - txmp.notifyTxsAvailable() - -} - -// defaultTxCallback performs the default CheckTx application callback. This is -// NOT executed when a transaction is first seen/received. Instead, this callback -// is executed during re-checking transactions (if enabled). A caller, i.e a -// block proposer, acquires a mempool write-lock via Lock() and when executing -// Update(), if the mempool is non-empty and Recheck is enabled, then all -// remaining transactions will be rechecked via CheckTxAsync. The order in which -// they are rechecked must be the same order in which this callback is called -// per transaction. -func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { - if txmp.recheckCursor == nil { - return - } - - txmp.metrics.RecheckTimes.Add(1) - - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if ok { - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key())) - } - - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } - - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() - } - - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - } -} - -// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For -// each transaction, it executes CheckTxAsync. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. -// -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs() { - if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") - } - - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - ctx := context.Background() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - } - } - } - - if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } -} - -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is returned -// and the transaction can be inserted into the mempool. -func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) - - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: numTxs, - MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, - MaxTxsBytes: txmp.config.MaxTxsBytes, - } - } - - return nil -} - -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height and/or time based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool entirely, except for the cache. -// -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. -func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[types.TxKey]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } - } - - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] - } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) - } -} - -func (txmp *TxMempool) notifyTxsAvailable() { - if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") - } - - if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once - txmp.notifiedTxsAvailable = true - - select { - case txmp.txsAvailable <- struct{}{}: - default: - } - } -} diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go deleted file mode 100644 index 56e6057a1b..0000000000 --- a/internal/mempool/v1/reactor_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package v1 - -import ( - "os" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*TxMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - }) - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*TxMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := GetChannelDescriptor(cfg.Mempool) - rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - - mempool := setup(t, 0) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - cfg.Mempool, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChannels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func TestReactorBroadcastDoesNotPanic(t *testing.T) { - numNodes := 2 - rts := setupReactors(t, numNodes, 0) - - observePanic := func(r interface{}) { - t.Fatal("panic detected in reactor") - } - - primary := rts.nodes[0] - secondary := rts.nodes[1] - primaryReactor := rts.reactors[primary] - primaryMempool := primaryReactor.mempool - secondaryReactor := rts.reactors[secondary] - - primaryReactor.observePanic = observePanic - secondaryReactor.observePanic = observePanic - - firstTx := &WrappedTx{} - primaryMempool.insertTx(firstTx) - - // run the router - rts.start(t) - - closer := tmsync.NewCloser() - primaryReactor.peerWG.Add(1) - go primaryReactor.broadcastTxRoutine(secondary, closer) - - wg := &sync.WaitGroup{} - for i := 0; i < 50; i++ { - next := &WrappedTx{} - wg.Add(1) - go func() { - defer wg.Done() - primaryMempool.insertTx(next) - }() - } - - err := primaryReactor.Stop() - require.NoError(t, err) - primaryReactor.peerWG.Wait() - wg.Wait() -} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go deleted file mode 100644 index c5b7ca82f6..0000000000 --- a/internal/mempool/v1/tx.go +++ /dev/null @@ -1,281 +0,0 @@ -package v1 - -import ( - "sort" - "time" - - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. -type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash types.TxKey - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx tmsync.RWMutex - hashTxs map[types.TxKey]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[types.TxKey]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed - } - - return false -} - -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[wtx.tx.Key()] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, wtx.tx.Key()) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] - return ok -} - -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx tmsync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool -} - -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } -} - -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) -} - -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) -} - -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx -} - -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } -} diff --git a/internal/p2p/address_test.go b/internal/p2p/address_test.go index 2745faf736..dfa3fb1e58 100644 --- a/internal/p2p/address_test.go +++ b/internal/p2p/address_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "net" "strings" "testing" @@ -204,6 +205,9 @@ func TestParseNodeAddress(t *testing.T) { func TestNodeAddress_Resolve(t *testing.T) { id := types.NodeID("00112233445566778899aabbccddeeff00112233") + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + testcases := []struct { address p2p.NodeAddress expect p2p.Endpoint @@ -272,15 +276,18 @@ func TestNodeAddress_Resolve(t *testing.T) { {p2p.NodeAddress{Protocol: "memory", Path: string(id)}, p2p.Endpoint{}, false}, {p2p.NodeAddress{Protocol: "tcp", Hostname: "💥"}, p2p.Endpoint{}, false}, } - for _, tc := range testcases { + for i, tc := range testcases { tc := tc t.Run(tc.address.String(), func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + endpoints, err := tc.address.Resolve(ctx) if !tc.ok { require.Error(t, err) return } - require.Contains(t, endpoints, tc.expect) + require.Contains(t, endpoints, tc.expect, i) }) } } diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go new file mode 100644 index 0000000000..1faa2a6d06 --- /dev/null +++ b/internal/p2p/channel.go @@ -0,0 +1,208 @@ +package p2p + +import ( + "context" + "fmt" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/types" +) + +// Envelope contains a message with sender/receiver routing info. +type Envelope struct { + From types.NodeID // sender (empty if outbound) + To types.NodeID // receiver (empty if inbound) + Broadcast bool // send to all connected peers (ignores To) + Message proto.Message // message payload + ChannelID ChannelID +} + +// Wrapper is a Protobuf message that can contain a variety of inner messages +// (e.g. via oneof fields). If a Channel's message type implements Wrapper, the +// Router will automatically wrap outbound messages and unwrap inbound messages, +// such that reactors do not have to do this themselves. +type Wrapper interface { + proto.Message + + // Wrap will take a message and wrap it in this one if possible. + Wrap(proto.Message) error + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} + +// PeerError is a peer error reported via Channel.Error. +// +// FIXME: This currently just disconnects the peer, which is too simplistic. +// For example, some errors should be logged, some should cause disconnects, +// and some should ban the peer. +// +// FIXME: This should probably be replaced by a more general PeerBehavior +// concept that can mark good and bad behavior and contributes to peer scoring. +// It should possibly also allow reactors to request explicit actions, e.g. +// disconnection or banning, in addition to doing this based on aggregates. +type PeerError struct { + NodeID types.NodeID + Err error +} + +func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) } +func (pe PeerError) Unwrap() error { return pe.Err } + +// Channel is a bidirectional channel to exchange Protobuf messages with peers. +// Each message is wrapped in an Envelope to specify its sender and receiver. +type Channel struct { + ID ChannelID + inCh <-chan Envelope // inbound messages (peers to reactors) + outCh chan<- Envelope // outbound messages (reactors to peers) + errCh chan<- PeerError // peer error reporting + + messageType proto.Message // the channel's message type, used for unmarshaling +} + +// NewChannel creates a new channel. It is primarily for internal and test +// use, reactors should use Router.OpenChannel(). +func NewChannel( + id ChannelID, + messageType proto.Message, + inCh <-chan Envelope, + outCh chan<- Envelope, + errCh chan<- PeerError, +) *Channel { + return &Channel{ + ID: id, + messageType: messageType, + inCh: inCh, + outCh: outCh, + errCh: errCh, + } +} + +// Send blocks until the envelope has been sent, or until ctx ends. +// An error only occurs if the context ends before the send completes. +func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { + select { + case <-ctx.Done(): + return ctx.Err() + case ch.outCh <- envelope: + return nil + } +} + +// SendError blocks until the given error has been sent, or ctx ends. +// An error only occurs if the context ends before the send completes. +func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { + select { + case <-ctx.Done(): + return ctx.Err() + case ch.errCh <- pe: + return nil + } +} + +// Receive returns a new unbuffered iterator to receive messages from ch. +// The iterator runs until ctx ends. +func (ch *Channel) Receive(ctx context.Context) *ChannelIterator { + iter := &ChannelIterator{ + pipe: make(chan Envelope), // unbuffered + } + go func() { + defer close(iter.pipe) + iteratorWorker(ctx, ch, iter.pipe) + }() + return iter +} + +// ChannelIterator provides a context-aware path for callers +// (reactors) to process messages from the P2P layer without relying +// on the implementation details of the P2P layer. Channel provides +// access to it's Outbound stream as an iterator, and the +// MergedChannelIterator makes it possible to combine multiple +// channels into a single iterator. +type ChannelIterator struct { + pipe chan Envelope + current *Envelope +} + +func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) { + for { + select { + case <-ctx.Done(): + return + case envelope := <-ch.inCh: + select { + case <-ctx.Done(): + return + case pipe <- envelope: + } + } + } +} + +// Next returns true when the Envelope value has advanced, and false +// when the context is canceled or iteration should stop. If an iterator has returned false, +// it will never return true again. +// in general, use Next, as in: +// +// for iter.Next(ctx) { +// envelope := iter.Envelope() +// // ... do things ... +// } +// +func (iter *ChannelIterator) Next(ctx context.Context) bool { + select { + case <-ctx.Done(): + iter.current = nil + return false + case envelope, ok := <-iter.pipe: + if !ok { + iter.current = nil + return false + } + + iter.current = &envelope + + return true + } +} + +// Envelope returns the current Envelope object held by the +// iterator. When the last call to Next returned true, Envelope will +// return a non-nil object. If Next returned false then Envelope is +// always nil. +func (iter *ChannelIterator) Envelope() *Envelope { return iter.current } + +// MergedChannelIterator produces an iterator that merges the +// messages from the given channels in arbitrary order. +// +// This allows the caller to consume messages from multiple channels +// without needing to manage the concurrency separately. +func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterator { + iter := &ChannelIterator{ + pipe: make(chan Envelope), // unbuffered + } + wg := new(sync.WaitGroup) + + for _, ch := range chs { + wg.Add(1) + go func(ch *Channel) { + defer wg.Done() + iteratorWorker(ctx, ch, iter.pipe) + }(ch) + } + + done := make(chan struct{}) + go func() { defer close(done); wg.Wait() }() + + go func() { + defer close(iter.pipe) + // we could return early if the context is canceled, + // but this is safer because it means the pipe stays + // open until all of the ch worker threads end, which + // should happen very quickly. + <-done + }() + + return iter +} diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go new file mode 100644 index 0000000000..e06e3e77ea --- /dev/null +++ b/internal/p2p/channel_test.go @@ -0,0 +1,221 @@ +package p2p + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/require" +) + +type channelInternal struct { + In chan Envelope + Out chan Envelope + Error chan PeerError +} + +func testChannel(size int) (*channelInternal, *Channel) { + in := &channelInternal{ + In: make(chan Envelope, size), + Out: make(chan Envelope, size), + Error: make(chan PeerError, size), + } + ch := &Channel{ + inCh: in.In, + outCh: in.Out, + errCh: in.Error, + } + return in, ch +} + +func TestChannel(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + testCases := []struct { + Name string + Case func(context.Context, *testing.T) + }{ + { + Name: "Send", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + require.NoError(t, ch.Send(ctx, Envelope{From: "kip", To: "merlin"})) + + res, ok := <-ins.Out + require.True(t, ok) + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + { + Name: "SendError", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + require.NoError(t, ch.SendError(ctx, PeerError{NodeID: "kip", Err: errors.New("merlin")})) + + res, ok := <-ins.Error + require.True(t, ok) + require.EqualValues(t, "kip", res.NodeID) + require.EqualValues(t, "merlin", res.Err.Error()) + }, + }, + { + Name: "SendWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + require.Error(t, ch.Send(cctx, Envelope{From: "kip", To: "merlin"})) + }, + }, + { + Name: "SendErrorWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + require.Error(t, ch.SendError(cctx, PeerError{NodeID: "kip", Err: errors.New("merlin")})) + }, + }, + { + Name: "ReceiveEmptyIteratorBlocks", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(1) + iter := ch.Receive(ctx) + require.NotNil(t, iter) + out := make(chan bool) + go func() { + defer close(out) + select { + case <-ctx.Done(): + case out <- iter.Next(ctx): + } + }() + select { + case <-time.After(10 * time.Millisecond): + case <-out: + require.Fail(t, "iterator should not advance") + } + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "ReceiveWithData", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + { + Name: "ReceiveWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + iter := ch.Receive(cctx) + require.NotNil(t, iter) + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + cctx, ccancel := context.WithCancel(ctx) + ccancel() + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorCanceledAfterFirstUseBecomesNil", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorMultipleNextCalls", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + + res1 := iter.Envelope() + require.Equal(t, res, res1) + }, + }, + { + Name: "IteratorProducesNilObjectBeforeNext", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + iter := ch.Receive(ctx) + require.NotNil(t, iter) + require.Nil(t, iter.Envelope()) + + ins.In <- Envelope{From: "kip", To: "merlin"} + require.NotNil(t, iter) + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.NotNil(t, res) + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(bctx) + defer cancel() + + tc.Case(ctx, t) + }) + } +} diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index a99e83dc51..ba9e9a2e3d 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -2,6 +2,7 @@ package conn import ( "bufio" + "context" "errors" "fmt" "io" @@ -9,14 +10,14 @@ import ( "net" "reflect" "runtime/debug" + "sync" "sync/atomic" "time" "github.com/gogo/protobuf/proto" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" @@ -48,8 +49,8 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID ChannelID, msgBytes []byte) -type errorCbFunc func(interface{}) +type receiveCbFunc func(ctx context.Context, chID ChannelID, msgBytes []byte) +type errorCbFunc func(context.Context, interface{}) /* Each peer has one `MConnection` (multiplex connection) instance. @@ -73,12 +74,13 @@ Inbound message bytes are handled with an onReceive callback function. */ type MConnection struct { service.BaseService + logger log.Logger conn net.Conn bufConnReader *bufio.Reader bufConnWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor + sendMonitor *flowrate.Monitor + recvMonitor *flowrate.Monitor send chan struct{} pong chan struct{} channels []*channel @@ -98,7 +100,9 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx tmsync.Mutex + stopMtx sync.Mutex + + cancel context.CancelFunc flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled. pingTimer *time.Ticker // send pings periodically @@ -146,12 +150,14 @@ func DefaultMConnConfig() MConnConfig { // NewMConnection wraps net.Conn and creates multiplex connection func NewMConnection( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, ) *MConnection { return NewMConnectionWithConfig( + logger, conn, chDescs, onReceive, @@ -161,6 +167,7 @@ func NewMConnection( // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config func NewMConnectionWithConfig( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, @@ -172,19 +179,23 @@ func NewMConnectionWithConfig( } mconn := &MConnection{ + logger: logger, conn: conn, bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), + sendMonitor: flowrate.New(0, 0), + recvMonitor: flowrate.New(0, 0), send: make(chan struct{}, 1), pong: make(chan struct{}, 1), onReceive: onReceive, onError: onError, config: config, created: time.Now(), + cancel: func() {}, } + mconn.BaseService = *service.NewBaseService(logger, "MConnection", mconn) + // Create channels var channelsIdx = map[ChannelID]*channel{} var channels = []*channel{} @@ -197,26 +208,14 @@ func NewMConnectionWithConfig( mconn.channels = channels mconn.channelsIdx = channelsIdx - mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) - // maxPacketMsgSize() is a bit heavy, so call just once mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() return mconn } -func (c *MConnection) SetLogger(l log.Logger) { - c.BaseService.SetLogger(l) - for _, ch := range c.channels { - ch.SetLogger(l) - } -} - // OnStart implements BaseService -func (c *MConnection) OnStart() error { - if err := c.BaseService.OnStart(); err != nil { - return err - } +func (c *MConnection) OnStart(ctx context.Context) error { c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) c.pingTimer = time.NewTicker(c.config.PingInterval) c.pongTimeoutCh = make(chan bool, 1) @@ -224,8 +223,8 @@ func (c *MConnection) OnStart() error { c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) c.quitRecvRoutine = make(chan struct{}) - go c.sendRoutine() - go c.recvRoutine() + go c.sendRoutine(ctx) + go c.recvRoutine(ctx) return nil } @@ -250,7 +249,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { default: } - c.BaseService.OnStop() c.flushTimer.Stop() c.pingTimer.Stop() c.chStatsTimer.Stop() @@ -280,28 +278,29 @@ func (c *MConnection) String() string { } func (c *MConnection) flush() { - c.Logger.Debug("Flush", "conn", c) + c.logger.Debug("Flush", "conn", c) err := c.bufConnWriter.Flush() if err != nil { - c.Logger.Debug("MConnection flush failed", "err", err) + c.logger.Debug("MConnection flush failed", "err", err) } } // Catch panics, usually caused by remote disconnects. -func (c *MConnection) _recover() { +func (c *MConnection) _recover(ctx context.Context) { if r := recover(); r != nil { - c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) - c.stopForError(fmt.Errorf("recovered from panic: %v", r)) + c.logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) + c.stopForError(ctx, fmt.Errorf("recovered from panic: %v", r)) } } -func (c *MConnection) stopForError(r interface{}) { +func (c *MConnection) stopForError(ctx context.Context, r interface{}) { if err := c.Stop(); err != nil { - c.Logger.Error("Error stopping connection", "err", err) + c.logger.Error("error stopping connection", "err", err) } + if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { if c.onError != nil { - c.onError(r) + c.onError(ctx, r) } } } @@ -312,12 +311,12 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { return false } - c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", msgBytes) + c.logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", msgBytes) // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) + c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -329,14 +328,14 @@ func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { default: } } else { - c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", msgBytes) + c.logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", msgBytes) } return success } // sendRoutine polls for packets to send from channels. -func (c *MConnection) sendRoutine() { - defer c._recover() +func (c *MConnection) sendRoutine(ctx context.Context) { + defer c._recover(ctx) protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter) FOR_LOOP: @@ -354,14 +353,13 @@ FOR_LOOP: channel.updateStats() } case <-c.pingTimer.C: - c.Logger.Debug("Send Ping") _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) if err != nil { - c.Logger.Error("Failed to send PacketPing", "err", err) + c.logger.Error("Failed to send PacketPing", "err", err) break SELECTION } c.sendMonitor.Update(_n) - c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) + c.logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { select { case c.pongTimeoutCh <- true: @@ -371,25 +369,25 @@ FOR_LOOP: c.flush() case timeout := <-c.pongTimeoutCh: if timeout { - c.Logger.Debug("Pong timeout") err = errors.New("pong timeout") } else { c.stopPongTimer() } case <-c.pong: - c.Logger.Debug("Send Pong") _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) if err != nil { - c.Logger.Error("Failed to send PacketPong", "err", err) + c.logger.Error("Failed to send PacketPong", "err", err) break SELECTION } c.sendMonitor.Update(_n) c.flush() + case <-ctx.Done(): + break FOR_LOOP case <-c.quitSendRoutine: break FOR_LOOP case <-c.send: // Send some PacketMsgs - eof := c.sendSomePacketMsgs() + eof := c.sendSomePacketMsgs(ctx) if !eof { // Keep sendRoutine awake. select { @@ -403,8 +401,8 @@ FOR_LOOP: break FOR_LOOP } if err != nil { - c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } } @@ -416,7 +414,7 @@ FOR_LOOP: // Returns true if messages from channels were exhausted. // Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { +func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. @@ -424,7 +422,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { + if c.sendPacketMsg(ctx) { return true } } @@ -432,7 +430,7 @@ func (c *MConnection) sendSomePacketMsgs() bool { } // Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { +func (c *MConnection) sendPacketMsg(ctx context.Context) bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 @@ -454,13 +452,13 @@ func (c *MConnection) sendPacketMsg() bool { if leastChannel == nil { return true } - // c.Logger.Info("Found a msgPacket to send") + // c.logger.Info("Found a msgPacket to send") // Make & send a PacketMsg from this channel _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) if err != nil { - c.Logger.Error("Failed to write PacketMsg", "err", err) - c.stopForError(err) + c.logger.Error("Failed to write PacketMsg", "err", err) + c.stopForError(ctx, err) return true } c.sendMonitor.Update(_n) @@ -472,8 +470,8 @@ func (c *MConnection) sendPacketMsg() bool { // After a whole message has been assembled, it's pushed to onReceive(). // Blocks depending on how the connection is throttled. // Otherwise, it never blocks. -func (c *MConnection) recvRoutine() { - defer c._recover() +func (c *MConnection) recvRoutine(ctx context.Context) { + defer c._recover(ctx) protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize) @@ -489,10 +487,10 @@ FOR_LOOP: if err == nil { // return } else { - c.Logger.Debug("Error peeking connection buffer", "err", err) + c.logger.Debug("error peeking connection buffer", "err", err) // return nil } - c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) + c.logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) } */ @@ -505,6 +503,7 @@ FOR_LOOP: // stopServices was invoked and we are shutting down // receiving is excpected to fail since we will close the connection select { + case <-ctx.Done(): case <-c.quitRecvRoutine: break FOR_LOOP default: @@ -512,11 +511,11 @@ FOR_LOOP: if c.IsRunning() { if err == io.EOF { - c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) + c.logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) } else { - c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + c.logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) } - c.stopForError(err) + c.stopForError(ctx, err) } break FOR_LOOP } @@ -526,14 +525,12 @@ FOR_LOOP: case *tmp2p.Packet_PacketPing: // TODO: prevent abuse, as they cause flush()'s. // https://github.com/tendermint/tendermint/issues/1190 - c.Logger.Debug("Receive Ping") select { case c.pong <- struct{}{}: default: // never block } case *tmp2p.Packet_PacketPong: - c.Logger.Debug("Receive Pong") select { case c.pongTimeoutCh <- false: default: @@ -544,28 +541,28 @@ FOR_LOOP: channel, ok := c.channelsIdx[channelID] if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) - c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg) if err != nil { if c.IsRunning() { - c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) } break FOR_LOOP } if msgBytes != nil { - c.Logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes) + c.logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes) // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine - c.onReceive(channelID, msgBytes) + c.onReceive(ctx, channelID, msgBytes) } default: err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } } @@ -656,7 +653,7 @@ type channel struct { maxPacketMsgPayloadSize int - Logger log.Logger + logger log.Logger } func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { @@ -670,13 +667,10 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, + logger: conn.logger, } } -func (ch *channel) SetLogger(l log.Logger) { - ch.Logger = l -} - // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout @@ -733,7 +727,7 @@ func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) { // complete, which is owned by the caller and will not be modified. // Not goroutine-safe func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { - ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) + ch.logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 1ed179ad86..dbbfe23bd8 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -1,8 +1,10 @@ package conn import ( + "context" "encoding/hex" "net" + "sync" "testing" "time" @@ -13,33 +15,34 @@ import ( "github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/proto/tendermint/types" ) const maxPingPongPacketSize = 1024 // bytes -func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID ChannelID, msgBytes []byte) { - } - onError := func(r interface{}) { - } - c := createMConnectionWithCallbacks(conn, onReceive, onError) - c.SetLogger(log.TestingLogger()) - return c +func createTestMConnection(logger log.Logger, conn net.Conn) *MConnection { + return createMConnectionWithCallbacks(logger, conn, + // onRecieve + func(ctx context.Context, chID ChannelID, msgBytes []byte) { + }, + // onError + func(ctx context.Context, r interface{}) { + }) } func createMConnectionWithCallbacks( + logger log.Logger, conn net.Conn, - onReceive func(chID ChannelID, msgBytes []byte), - onError func(r interface{}), + onReceive func(ctx context.Context, chID ChannelID, msgBytes []byte), + onError func(ctx context.Context, r interface{}), ) *MConnection { cfg := DefaultMConnConfig() cfg.PingInterval = 90 * time.Millisecond cfg.PongTimeout = 45 * time.Millisecond chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) - c.SetLogger(log.TestingLogger()) + c := NewMConnectionWithConfig(logger, conn, chDescs, onReceive, onError, cfg) return c } @@ -47,10 +50,13 @@ func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - clientConn := createTestMConnection(client) - err := clientConn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, clientConn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientConn := createTestMConnection(log.TestingLogger(), client) + err := clientConn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(clientConn)) msg := []byte("abc") assert.True(t, clientConn.Send(0x01, msg)) @@ -81,10 +87,13 @@ func TestMConnectionSend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createTestMConnection(log.TestingLogger(), client) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) msg := []byte("Ant-Man") assert.True(t, mconn.Send(0x01, msg)) @@ -111,21 +120,32 @@ func TestMConnectionReceive(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn1.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn1)) + logger := log.TestingLogger() - mconn2 := createTestMConnection(server) - err = mconn2.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn2)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn1 := createMConnectionWithCallbacks(logger, client, onReceive, onError) + err := mconn1.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn1)) + + mconn2 := createTestMConnection(logger, server) + err = mconn2.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn2)) msg := []byte("Cyclops") assert.True(t, mconn2.Send(0x01, msg)) @@ -146,16 +166,26 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) serverGotPing := make(chan struct{}) go func() { @@ -184,16 +214,26 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) // sending 3 pongs in a row (abuse) protoWriter := protoio.NewDelimitedWriter(server) @@ -238,16 +278,25 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 @@ -285,16 +334,26 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) serverGotPing := make(chan struct{}) go func() { @@ -342,16 +401,25 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID ChannelID, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.TestingLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) if err := client.Close(); err != nil { t.Error(err) @@ -368,32 +436,40 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { } } -func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { +func newClientAndServerConnsForReadErrors( + ctx context.Context, + t *testing.T, + chOnErr chan struct{}, +) (*MConnection, *MConnection) { server, client := NetPipe() - onReceive := func(chID ChannelID, msgBytes []byte) {} - onError := func(r interface{}) {} + onReceive := func(context.Context, ChannelID, []byte) {} + onError := func(context.Context, interface{}) {} // create client conn with two channels chDescs := []*ChannelDescriptor{ {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, } - mconnClient := NewMConnection(client, chDescs, onReceive, onError) - mconnClient.SetLogger(log.TestingLogger().With("module", "client")) - err := mconnClient.Start() - require.Nil(t, err) + logger := log.TestingLogger() + + mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError) + err := mconnClient.Start(ctx) + require.NoError(t, err) // create server conn with 1 channel // it fires on chOnErr when there's an error - serverLogger := log.TestingLogger().With("module", "server") - onError = func(r interface{}) { - chOnErr <- struct{}{} - } - mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) - mconnServer.SetLogger(serverLogger) - err = mconnServer.Start() - require.Nil(t, err) + serverLogger := logger.With("module", "server") + onError = func(ctx context.Context, r interface{}) { + select { + case <-ctx.Done(): + case chOnErr <- struct{}{}: + } + } + + mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) + err = mconnServer.Start(ctx) + require.NoError(t, err) return mconnClient, mconnServer } @@ -408,8 +484,11 @@ func expectSend(ch chan struct{}) bool { } func TestMConnectionReadErrorBadEncoding(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) client := mconnClient.conn @@ -417,12 +496,15 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { _, err := client.Write([]byte{1, 2, 3, 4, 5}) require.NoError(t, err) assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + t.Cleanup(waitAll(mconnClient, mconnServer)) } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) msg := []byte("Ant-Man") @@ -433,18 +515,24 @@ func TestMConnectionReadErrorUnknownChannel(t *testing.T) { // should cause an error assert.True(t, mconnClient.Send(0x02, msg)) assert.True(t, expectSend(chOnErr), "unknown channel") - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + t.Cleanup(waitAll(mconnClient, mconnServer)) } func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) + + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn @@ -474,9 +562,12 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { } func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) // send msg with unknown msg type _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) @@ -487,11 +578,13 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionTrySend(t *testing.T) { server, client := NetPipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + mconn := createTestMConnection(log.TestingLogger(), client) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) @@ -507,7 +600,6 @@ func TestMConnectionTrySend(t *testing.T) { assert.Equal(t, "TrySend", <-resultCh) } -// nolint:lll //ignore line length for tests func TestConnVectors(t *testing.T) { testCases := []struct { @@ -535,11 +627,17 @@ func TestMConnectionChannelOverflow(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) - mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn @@ -561,16 +659,26 @@ func TestMConnectionChannelOverflow(t *testing.T) { } -type stopper interface { - Stop() error -} - -func stopAll(t *testing.T, stoppers ...stopper) func() { +func waitAll(waiters ...service.Service) func() { return func() { - for _, s := range stoppers { - if err := s.Stop(); err != nil { - t.Log(err) + switch len(waiters) { + case 0: + return + case 1: + waiters[0].Wait() + return + default: + wg := &sync.WaitGroup{} + + for _, w := range waiters { + wg.Add(1) + go func(s service.Service) { + defer wg.Done() + s.Wait() + }(w) } + + wg.Wait() } } } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index 35fac488a1..ad51237e49 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -24,9 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/async" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -76,11 +76,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 84384011b4..362c8102f0 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" mrand "math/rand" "os" @@ -22,7 +21,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/sr25519" - "github.com/tendermint/tendermint/libs/async" + "github.com/tendermint/tendermint/internal/libs/async" tmrand "github.com/tendermint/tendermint/libs/rand" ) @@ -53,6 +52,7 @@ func (pk privKeyWithNilPubKey) Sign(msg []byte) ([]byte, error) { return pk.orig func (pk privKeyWithNilPubKey) PubKey() crypto.PubKey { return nil } func (pk privKeyWithNilPubKey) Equals(pk2 crypto.PrivKey) bool { return pk.orig.Equals(pk2) } func (pk privKeyWithNilPubKey) Type() string { return "privKeyWithNilPubKey" } +func (privKeyWithNilPubKey) TypeTag() string { return "test/privKeyWithNilPubKey" } func TestSecretConnectionHandshake(t *testing.T) { fooSecConn, barSecConn := makeSecretConnPair(t) @@ -126,7 +126,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { nodePrvKey := ed25519.GenPrivKey() nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) if err != nil { - t.Errorf("failed to establish SecretConnection for node: %v", err) + t.Errorf("failed to establish SecretConnection for node: %w", err) return nil, true, err } // In parallel, handle some reads and writes. @@ -136,7 +136,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { for _, nodeWrite := range nodeWrites { n, err := nodeSecretConn.Write([]byte(nodeWrite)) if err != nil { - t.Errorf("failed to write to nodeSecretConn: %v", err) + t.Errorf("failed to write to nodeSecretConn: %w", err) return nil, true, err } if n != len(nodeWrite) { @@ -163,7 +163,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { } return nil, false, nil } else if err != nil { - t.Errorf("failed to read from nodeSecretConn: %v", err) + t.Errorf("failed to read from nodeSecretConn: %w", err) return nil, true, err } *nodeReads = append(*nodeReads, string(readBuffer[:n])) @@ -229,7 +229,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) + require.NoError(t, os.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { @@ -241,15 +241,15 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { line := scanner.Text() params := strings.Split(line, ",") randSecretVector, err := hex.DecodeString(params[0]) - require.Nil(t, err) + require.NoError(t, err) randSecret := new([32]byte) copy((*randSecret)[:], randSecretVector) locIsLeast, err := strconv.ParseBool(params[1]) - require.Nil(t, err) + require.NoError(t, err) expectedRecvSecret, err := hex.DecodeString(params[2]) - require.Nil(t, err) + require.NoError(t, err) expectedSendSecret, err := hex.DecodeString(params[3]) - require.Nil(t, err) + require.NoError(t, err) recvSecret, sendSecret := deriveSecrets(randSecret, locIsLeast) require.Equal(t, expectedRecvSecret, (*recvSecret)[:], "Recv Secrets aren't equal") @@ -288,7 +288,7 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n i for i := 0; i < n; i++ { _, err := conn.Write([]byte(txt)) if err != nil { - t.Errorf("failed to write to fooSecConn: %v", err) + t.Errorf("failed to write to fooSecConn: %w", err) return } } @@ -343,7 +343,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection func(_ int) (val interface{}, abort bool, err error) { fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) if err != nil { - tb.Errorf("failed to establish SecretConnection for foo: %v", err) + tb.Errorf("failed to establish SecretConnection for foo: %w", err) return nil, true, err } remotePubBytes := fooSecConn.RemotePubKey() @@ -358,7 +358,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection func(_ int) (val interface{}, abort bool, err error) { barSecConn, err = MakeSecretConnection(barConn, barPrvKey) if barSecConn == nil { - tb.Errorf("failed to establish SecretConnection for bar: %v", err) + tb.Errorf("failed to establish SecretConnection for bar: %w", err) return nil, true, err } remotePubBytes := barSecConn.RemotePubKey() @@ -405,7 +405,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { if err == io.EOF { return } else if err != nil { - b.Errorf("failed to read from barSecConn: %v", err) + b.Errorf("failed to read from barSecConn: %w", err) return } } @@ -416,7 +416,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { idx := mrand.Intn(len(fooWriteBytes)) _, err := fooSecConn.Write(fooWriteBytes[idx]) if err != nil { - b.Errorf("failed to write to fooSecConn: %v", err) + b.Errorf("failed to write to fooSecConn: %w", err) return } } diff --git a/internal/p2p/metrics.go b/internal/p2p/metrics.go index e3481058b2..2780d221ef 100644 --- a/internal/p2p/metrics.go +++ b/internal/p2p/metrics.go @@ -1,6 +1,11 @@ package p2p import ( + "fmt" + "reflect" + "regexp" + "sync" + "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" "github.com/go-kit/kit/metrics/prometheus" @@ -13,6 +18,13 @@ const ( MetricsSubsystem = "p2p" ) +var ( + // valueToLabelRegexp is used to find the golang package name and type name + // so that the name can be turned into a prometheus label where the characters + // in the label do not include prometheus special characters such as '*' and '.'. + valueToLabelRegexp = regexp.MustCompile(`\*?(\w+)\.(.*)`) +) + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. @@ -43,13 +55,14 @@ type Metrics struct { // PeerQueueMsgSize defines the average size of messages sent over a peer's // queue for a specific flow (i.e. Channel). PeerQueueMsgSize metrics.Gauge + + mtx *sync.RWMutex + messageLabelNames map[reflect.Type]string } // PrometheusMetrics returns Metrics build using Prometheus client library. // Optionally, labels can be provided along with their values ("foo", // "fooValue"). -// -// nolint: lll func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels := []string{} for i := 0; i < len(labelsAndValues); i += 2 { @@ -68,14 +81,14 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "peer_receive_bytes_total", Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_send_bytes_total", Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id", "chID")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID", "message_type")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, @@ -118,6 +131,9 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "router_channel_queue_msg_size", Help: "The size of messages sent over a peer's queue for a specific p2p Channel.", }, append(labels, "ch_id")).With(labelsAndValues...), + + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, } } @@ -133,5 +149,30 @@ func NopMetrics() *Metrics { RouterChannelQueueSend: discard.NewHistogram(), PeerQueueDroppedMsgs: discard.NewCounter(), PeerQueueMsgSize: discard.NewGauge(), + mtx: &sync.RWMutex{}, + messageLabelNames: map[reflect.Type]string{}, + } +} + +// ValueToMetricLabel is a method that is used to produce a prometheus label value of the golang +// type that is passed in. +// This method uses a map on the Metrics struct so that each label name only needs +// to be produced once to prevent expensive string operations. +func (m *Metrics) ValueToMetricLabel(i interface{}) string { + t := reflect.TypeOf(i) + m.mtx.RLock() + + if s, ok := m.messageLabelNames[t]; ok { + m.mtx.RUnlock() + return s } + m.mtx.RUnlock() + + s := t.String() + ss := valueToLabelRegexp.FindStringSubmatch(s) + l := fmt.Sprintf("%s_%s", ss[1], ss[2]) + m.mtx.Lock() + defer m.mtx.Unlock() + m.messageLabelNames[t] = l + return l } diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go new file mode 100644 index 0000000000..53b3c47bd8 --- /dev/null +++ b/internal/p2p/metrics_test.go @@ -0,0 +1,19 @@ +package p2p + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +func TestValueToMetricsLabel(t *testing.T) { + m := NopMetrics() + r := &p2p.PexResponse{} + str := m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) + + // subsequent calls to the function should produce the same result + str = m.ValueToMetricLabel(r) + assert.Equal(t, "p2p_PexResponse", str) +} diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 65b9afafb6..576fb23863 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -79,20 +79,20 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { return r0 } -// ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { - ret := _m.Called() +// ReceiveMessage provides a mock function with given fields: _a0 +func (_m *Connection) ReceiveMessage(_a0 context.Context) (conn.ChannelID, []byte, error) { + ret := _m.Called(_a0) var r0 conn.ChannelID - if rf, ok := ret.Get(0).(func() conn.ChannelID); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) conn.ChannelID); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte - if rf, ok := ret.Get(1).(func() []byte); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) []byte); ok { + r1 = rf(_a0) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]byte) @@ -100,8 +100,8 @@ func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { } var r2 error - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) } else { r2 = ret.Error(2) } @@ -123,13 +123,13 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { return r0 } -// SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 conn.ChannelID, _a1 []byte) error { - ret := _m.Called(_a0, _a1) +// SendMessage provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Connection) SendMessage(_a0 context.Context, _a1 conn.ChannelID, _a2 []byte) error { + ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(conn.ChannelID, []byte) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, conn.ChannelID, []byte) error); ok { + r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) } diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 2fc7baa29e..b172901182 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -17,13 +17,13 @@ type Transport struct { mock.Mock } -// Accept provides a mock function with given fields: -func (_m *Transport) Accept() (p2p.Connection, error) { - ret := _m.Called() +// Accept provides a mock function with given fields: _a0 +func (_m *Transport) Accept(_a0 context.Context) (p2p.Connection, error) { + ret := _m.Called(_a0) var r0 p2p.Connection - if rf, ok := ret.Get(0).(func() p2p.Connection); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) p2p.Connection); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(p2p.Connection) @@ -31,8 +31,8 @@ func (_m *Transport) Accept() (p2p.Connection, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -98,6 +98,20 @@ func (_m *Transport) Endpoints() []p2p.Endpoint { return r0 } +// Listen provides a mock function with given fields: _a0 +func (_m *Transport) Listen(_a0 p2p.Endpoint) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(p2p.Endpoint) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Protocols provides a mock function with given fields: func (_m *Transport) Protocols() []p2p.Protocol { ret := _m.Called() diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 642114a1d8..d8657b774c 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -1,8 +1,6 @@ package p2p_test import ( - "context" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" @@ -13,7 +11,6 @@ import ( // Common setup for P2P tests. var ( - ctx = context.Background() chID = p2p.ChannelID(1) chDesc = &p2p.ChannelDescriptor{ ID: chID, diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index c808ad3e05..3117472bef 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -24,6 +24,7 @@ type Network struct { logger log.Logger memoryNetwork *p2p.MemoryNetwork + cancel context.CancelFunc } // NetworkOptions is an argument structure to parameterize the @@ -47,7 +48,7 @@ func (opts *NetworkOptions) setDefaults() { // MakeNetwork creates a test network with the given number of nodes and // connects them to each other. -func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { +func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network { opts.setDefaults() logger := log.TestingLogger() network := &Network{ @@ -57,7 +58,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { } for i := 0; i < opts.NumNodes; i++ { - node := network.MakeNode(t, opts.NodeOpts) + node := network.MakeNode(ctx, t, opts.NodeOpts) network.Nodes[node.NodeID] = node } @@ -67,15 +68,19 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { // Start starts the network by setting up a list of node addresses to dial in // addition to creating a peer update subscription for each node. Finally, all // nodes are connected to each other. -func (n *Network) Start(t *testing.T) { +func (n *Network) Start(ctx context.Context, t *testing.T) { + ctx, n.cancel = context.WithCancel(ctx) + t.Cleanup(n.cancel) + // Set up a list of node addresses to dial, and a peer update subscription // for each node. dialQueue := []p2p.NodeAddress{} subs := map[types.NodeID]*p2p.PeerUpdates{} + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, node := range n.Nodes { dialQueue = append(dialQueue, node.NodeAddress) - subs[node.NodeID] = node.PeerManager.Subscribe() - defer subs[node.NodeID].Close() + subs[node.NodeID] = node.PeerManager.Subscribe(subctx) } // For each node, dial the nodes that it still doesn't have a connection to @@ -93,6 +98,8 @@ func (n *Network) Start(t *testing.T) { require.True(t, added) select { + case <-ctx.Done(): + require.Fail(t, "operation canceled") case peerUpdate := <-sourceSub.Updates(): require.Equal(t, p2p.PeerUpdate{ NodeID: targetNode.NodeID, @@ -104,6 +111,8 @@ func (n *Network) Start(t *testing.T) { } select { + case <-ctx.Done(): + require.Fail(t, "operation canceled") case peerUpdate := <-targetSub.Updates(): require.Equal(t, p2p.PeerUpdate{ NodeID: sourceNode.NodeID, @@ -135,12 +144,13 @@ func (n *Network) NodeIDs() []types.NodeID { // MakeChannels makes a channel on all nodes and returns them, automatically // doing error checks and cleanups. func (n *Network) MakeChannels( + ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannel(t, chDesc) + channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc) } return channels } @@ -149,12 +159,13 @@ func (n *Network) MakeChannels( // automatically doing error checks. The caller must ensure proper cleanup of // all the channels. func (n *Network) MakeChannelsNoCleanup( + ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc) + channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc) } return channels } @@ -181,23 +192,24 @@ func (n *Network) Peers(id types.NodeID) []*Node { // Remove removes a node from the network, stopping it and waiting for all other // nodes to pick up the disconnection. -func (n *Network) Remove(t *testing.T, id types.NodeID) { +func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) { require.Contains(t, n.Nodes, id) node := n.Nodes[id] delete(n.Nodes, id) subs := []*p2p.PeerUpdates{} + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, peer := range n.Nodes { - sub := peer.PeerManager.Subscribe() - defer sub.Close() + sub := peer.PeerManager.Subscribe(subctx) subs = append(subs, sub) } require.NoError(t, node.Transport.Close()) + node.cancel() if node.Router.IsRunning() { require.NoError(t, node.Router.Stop()) } - node.PeerManager.Close() for _, sub := range subs { RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -216,12 +228,16 @@ type Node struct { Router *p2p.Router PeerManager *p2p.PeerManager Transport *p2p.MemoryTransport + + cancel context.CancelFunc } // MakeNode creates a new Node configured for the network with a // running peer manager, but does not add it to the existing // network. Callers are responsible for updating peering relationships. -func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { +func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node { + ctx, cancel := context.WithCancel(ctx) + privKey := ed25519.GenPrivKey() nodeID := types.NodeIDFromPubKey(privKey.PubKey()) nodeInfo := types.NodeInfo{ @@ -243,23 +259,26 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { require.NoError(t, err) router, err := p2p.NewRouter( + ctx, n.logger, p2p.NopMetrics(), nodeInfo, privKey, peerManager, []p2p.Transport{transport}, + transport.Endpoints(), p2p.RouterOptions{DialSleep: func(_ context.Context) {}}, ) + require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) t.Cleanup(func() { if router.IsRunning() { require.NoError(t, router.Stop()) } - peerManager.Close() require.NoError(t, transport.Close()) + cancel() }) return &Node{ @@ -270,6 +289,7 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { Router: router, PeerManager: peerManager, Transport: transport, + cancel: cancel, } } @@ -277,15 +297,17 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { // test cleanup, it also checks that the channel is empty, to make sure // all expected messages have been asserted. func (n *Node) MakeChannel( + ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, ) *p2p.Channel { - channel, err := n.Router.OpenChannel(chDesc) + ctx, cancel := context.WithCancel(ctx) + channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { - RequireEmpty(t, channel) - channel.Close() + RequireEmpty(ctx, t, channel) + cancel() }) return channel } @@ -293,23 +315,22 @@ func (n *Node) MakeChannel( // MakeChannelNoCleanup opens a channel, with automatic error handling. The // caller must ensure proper cleanup of the channel. func (n *Node) MakeChannelNoCleanup( + ctx context.Context, t *testing.T, chDesc *p2p.ChannelDescriptor, ) *p2p.Channel { - channel, err := n.Router.OpenChannel(chDesc) + channel, err := n.Router.OpenChannel(ctx, chDesc) require.NoError(t, err) return channel } // MakePeerUpdates opens a peer update subscription, with automatic cleanup. // It checks that all updates have been consumed during cleanup. -func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { +func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdates { t.Helper() - sub := n.PeerManager.Subscribe() + sub := n.PeerManager.Subscribe(ctx) t.Cleanup(func() { - t.Helper() - RequireNoUpdates(t, sub) - sub.Close() + RequireNoUpdates(ctx, t, sub) }) return sub @@ -318,13 +339,8 @@ func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates { // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup. // It does *not* check that all updates have been consumed, but will // close the update channel. -func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { - sub := n.PeerManager.Subscribe() - t.Cleanup(func() { - sub.Close() - }) - - return sub +func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, t *testing.T) *p2p.PeerUpdates { + return n.PeerManager.Subscribe(ctx) } func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index a9fc16a34c..f492ff09e6 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -1,10 +1,13 @@ package p2ptest import ( + "context" + "errors" "testing" "time" "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" @@ -12,106 +15,117 @@ import ( ) // RequireEmpty requires that the given channel is empty. -func RequireEmpty(t *testing.T, channels ...*p2p.Channel) { - for _, channel := range channels { - select { - case e := <-channel.In: - require.Fail(t, "unexpected message", "channel %v should be empty, got %v", channel.ID, e) - case <-time.After(10 * time.Millisecond): - } +func RequireEmpty(ctx context.Context, t *testing.T, channels ...*p2p.Channel) { + t.Helper() + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + + iter := p2p.MergedChannelIterator(ctx, channels...) + count := 0 + for iter.Next(ctx) { + count++ + require.Nil(t, iter.Envelope()) } + require.Zero(t, count) + require.Error(t, ctx.Err()) } // RequireReceive requires that the given envelope is received on the channel. -func RequireReceive(t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { +func RequireReceive(ctx context.Context, t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { t.Helper() - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - - select { - case e, ok := <-channel.In: - require.True(t, ok, "channel %v is closed", channel.ID) - require.Equal(t, expect, e) + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - case <-channel.Done(): - require.Fail(t, "channel %v is closed", channel.ID) + iter := channel.Receive(ctx) + count := 0 + for iter.Next(ctx) { + count++ + envelope := iter.Envelope() + require.Equal(t, expect.From, envelope.From) + require.Equal(t, expect.Message, envelope.Message) + } - case <-timer.C: - require.Fail(t, "timed out waiting for message", "%v on channel %v", expect, channel.ID) + if !assert.True(t, count >= 1) { + require.NoError(t, ctx.Err(), "timed out waiting for message %v", expect) } } // RequireReceiveUnordered requires that the given envelopes are all received on // the channel, ignoring order. -func RequireReceiveUnordered(t *testing.T, channel *p2p.Channel, expect []p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() +func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - actual := []p2p.Envelope{} - for { - select { - case e, ok := <-channel.In: - require.True(t, ok, "channel %v is closed", channel.ID) - actual = append(actual, e) - if len(actual) == len(expect) { - require.ElementsMatch(t, expect, actual) - return - } + actual := []*p2p.Envelope{} - case <-channel.Done(): - require.Fail(t, "channel %v is closed", channel.ID) - - case <-timer.C: - require.ElementsMatch(t, expect, actual) + iter := channel.Receive(ctx) + for iter.Next(ctx) { + actual = append(actual, iter.Envelope()) + if len(actual) == len(expect) { + require.ElementsMatch(t, expect, actual, "len=%d", len(actual)) return } } + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.ElementsMatch(t, expect, actual) + } } // RequireSend requires that the given envelope is sent on the channel. -func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Out <- envelope: - case <-timer.C: - require.Fail(t, "timed out sending message", "%v on channel %v", envelope, channel.ID) +func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { + tctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + err := channel.Send(tctx, envelope) + switch { + case errors.Is(err, context.DeadlineExceeded): + require.Fail(t, "timed out sending message to %q", envelope.To) + default: + require.NoError(t, err, "unexpected error") } } // RequireSendReceive requires that a given Protobuf message is sent to the // given peer, and then that the given response is received back. func RequireSendReceive( + ctx context.Context, t *testing.T, channel *p2p.Channel, peerID types.NodeID, send proto.Message, receive proto.Message, ) { - RequireSend(t, channel, p2p.Envelope{To: peerID, Message: send}) - RequireReceive(t, channel, p2p.Envelope{From: peerID, Message: send}) + RequireSend(ctx, t, channel, p2p.Envelope{To: peerID, Message: send}) + RequireReceive(ctx, t, channel, p2p.Envelope{From: peerID, Message: send}) } // RequireNoUpdates requires that a PeerUpdates subscription is empty. -func RequireNoUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates) { +func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUpdates) { t.Helper() select { case update := <-peerUpdates.Updates(): - require.Fail(t, "unexpected peer updates", "got %v", update) + if ctx.Err() == nil { + require.Fail(t, "unexpected peer updates", "got %v", update) + } + case <-ctx.Done(): default: } } // RequireError requires that the given peer error is submitted for a peer. -func RequireError(t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Error <- peerError: - case <-timer.C: +func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { + tctx, tcancel := context.WithTimeout(ctx, time.Second) + defer tcancel() + + err := channel.SendError(tctx, peerError) + switch { + case errors.Is(err, context.DeadlineExceeded): require.Fail(t, "timed out reporting error", "%v on %v", peerError, channel.ID) + default: + require.NoError(t, err, "unexpected error") } } @@ -124,9 +138,6 @@ func RequireUpdate(t *testing.T, peerUpdates *p2p.PeerUpdates, expect p2p.PeerUp case update := <-peerUpdates.Updates(): require.Equal(t, expect, update, "peer update did not match") - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") - case <-timer.C: require.Fail(t, "timed out waiting for peer update", "expected %v", expect) } @@ -148,9 +159,6 @@ func RequireUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates, expect []p2p.Pee return } - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") - case <-timer.C: require.Equal(t, expect, actual, "did not receive expected peer updates") return diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 7ccc0d59c6..2edc5b3b64 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -56,8 +56,6 @@ type PeerUpdate struct { type PeerUpdates struct { routerUpdatesCh chan PeerUpdate reactorUpdatesCh chan PeerUpdate - closeCh chan struct{} - closeOnce sync.Once } // NewPeerUpdates creates a new PeerUpdates subscription. It is primarily for @@ -67,7 +65,6 @@ func NewPeerUpdates(updatesCh chan PeerUpdate, buf int) *PeerUpdates { return &PeerUpdates{ reactorUpdatesCh: updatesCh, routerUpdatesCh: make(chan PeerUpdate, buf), - closeCh: make(chan struct{}), } } @@ -78,28 +75,13 @@ func (pu *PeerUpdates) Updates() <-chan PeerUpdate { // SendUpdate pushes information about a peer into the routing layer, // presumably from a peer. -func (pu *PeerUpdates) SendUpdate(update PeerUpdate) { +func (pu *PeerUpdates) SendUpdate(ctx context.Context, update PeerUpdate) { select { - case <-pu.closeCh: + case <-ctx.Done(): case pu.routerUpdatesCh <- update: } } -// Close closes the peer updates subscription. -func (pu *PeerUpdates) Close() { - pu.closeOnce.Do(func() { - // NOTE: We don't close updatesCh since multiple goroutines may be - // sending on it. The PeerManager senders will select on closeCh as well - // to avoid blocking on a closed subscription. - close(pu.closeCh) - }) -} - -// Done returns a channel that is closed when the subscription is closed. -func (pu *PeerUpdates) Done() <-chan struct{} { - return pu.closeCh -} - // PeerManagerOptions specifies options for a PeerManager. type PeerManagerOptions struct { // PersistentPeers are peers that we want to maintain persistent connections @@ -154,6 +136,10 @@ type PeerManagerOptions struct { // consider private and never gossip. PrivatePeers map[types.NodeID]struct{} + // SelfAddress is the address that will be advertised to peers for them to dial back to us. + // If Hostname and Port are unset, Advertise() will include no self-announcement + SelfAddress NodeAddress + // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). persistentPeers map[types.NodeID]bool @@ -276,8 +262,6 @@ type PeerManager struct { rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes - closeCh chan struct{} // signal channel for Close() - closeOnce sync.Once mtx sync.Mutex store *peerStore @@ -312,7 +296,6 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), - closeCh: make(chan struct{}), store: store, dialing: map[types.NodeID]bool{}, @@ -513,7 +496,7 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { // for dialing again when appropriate (possibly after a retry timeout). // // FIXME: This should probably delete or mark bad addresses/peers after some time. -func (m *PeerManager) DialFailed(address NodeAddress) error { +func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error { m.mtx.Lock() defer m.mtx.Unlock() @@ -532,6 +515,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error { if !ok { return nil // Assume the address has been removed, ignore. } + addressInfo.LastDialFailure = time.Now().UTC() addressInfo.DialFailures++ if err := m.store.Set(peer); err != nil { @@ -551,7 +535,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error { select { case <-timer.C: m.dialWaker.Wake() - case <-m.closeCh: + case <-ctx.Done(): } }() } else { @@ -602,6 +586,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { addressInfo.LastDialSuccess = now // If not found, assume address has been removed. } + if err := m.store.Set(peer); err != nil { return err } @@ -660,6 +645,11 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { peer = m.newPeerInfo(peerID) } + // reset this to avoid penalizing peers for their past transgressions + for _, addr := range peer.AddressInfo { + addr.DialFailures = 0 + } + // If all connections slots are full, but we allow upgrades (and we checked // above that we have upgrade capacity), then we can look for a lower-scored // peer to replace and if found accept the connection anyway and evict it. @@ -688,13 +678,13 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { // peer must already be marked as connected. This is separate from Dialed() and // Accepted() to allow the router to set up its internal queues before reactors // start sending messages. -func (m *PeerManager) Ready(peerID types.NodeID) { +func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() if m.connected[peerID] { m.ready[peerID] = true - m.broadcast(PeerUpdate{ + m.broadcast(ctx, PeerUpdate{ NodeID: peerID, Status: PeerStatusUp, }) @@ -755,7 +745,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // Disconnected unmarks a peer as connected, allowing it to be dialed or // accepted again as appropriate. -func (m *PeerManager) Disconnected(peerID types.NodeID) { +func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() @@ -768,7 +758,7 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) { delete(m.ready, peerID) if ready { - m.broadcast(PeerUpdate{ + m.broadcast(ctx, PeerUpdate{ NodeID: peerID, Status: PeerStatusDown, }) @@ -805,6 +795,13 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress defer m.mtx.Unlock() addresses := make([]NodeAddress, 0, limit) + + // advertise ourselves, to let everyone know how to dial us back + // and enable mutual address discovery + if m.options.SelfAddress.Hostname != "" && m.options.SelfAddress.Port != 0 { + addresses = append(addresses, m.options.SelfAddress) + } + for _, peer := range m.store.Ranked() { if peer.ID == peerID { continue @@ -828,7 +825,7 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress // Subscribe subscribes to peer updates. The caller must consume the peer // updates in a timely fashion and close the subscription when done, otherwise // the PeerManager will halt. -func (m *PeerManager) Subscribe() *PeerUpdates { +func (m *PeerManager) Subscribe(ctx context.Context) *PeerUpdates { // FIXME: We use a size 1 buffer here. When we broadcast a peer update // we have to loop over all of the subscriptions, and we want to avoid // having to block and wait for a context switch before continuing on @@ -836,7 +833,7 @@ func (m *PeerManager) Subscribe() *PeerUpdates { // compounding. Limiting it to 1 means that the subscribers are still // reasonably in sync. However, this should probably be benchmarked. peerUpdates := NewPeerUpdates(make(chan PeerUpdate, 1), 1) - m.Register(peerUpdates) + m.Register(ctx, peerUpdates) return peerUpdates } @@ -848,39 +845,38 @@ func (m *PeerManager) Subscribe() *PeerUpdates { // The caller must consume the peer updates from this PeerUpdates // instance in a timely fashion and close the subscription when done, // otherwise the PeerManager will halt. -func (m *PeerManager) Register(peerUpdates *PeerUpdates) { +func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { m.mtx.Lock() + defer m.mtx.Unlock() m.subscriptions[peerUpdates] = peerUpdates - m.mtx.Unlock() go func() { for { select { - case <-peerUpdates.closeCh: - return - case <-m.closeCh: + case <-ctx.Done(): return case pu := <-peerUpdates.routerUpdatesCh: - m.processPeerEvent(pu) + m.processPeerEvent(ctx, pu) } } }() go func() { - select { - case <-peerUpdates.Done(): - m.mtx.Lock() - delete(m.subscriptions, peerUpdates) - m.mtx.Unlock() - case <-m.closeCh: - } + <-ctx.Done() + m.mtx.Lock() + defer m.mtx.Unlock() + delete(m.subscriptions, peerUpdates) }() } -func (m *PeerManager) processPeerEvent(pu PeerUpdate) { +func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) { m.mtx.Lock() defer m.mtx.Unlock() + if ctx.Err() != nil { + return + } + if _, ok := m.store.peers[pu.NodeID]; !ok { m.store.peers[pu.NodeID] = &peerInfo{} } @@ -900,29 +896,19 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) { // // FIXME: Consider using an internal channel to buffer updates while also // maintaining order if this is a problem. -func (m *PeerManager) broadcast(peerUpdate PeerUpdate) { +func (m *PeerManager) broadcast(ctx context.Context, peerUpdate PeerUpdate) { for _, sub := range m.subscriptions { - // We have to check closeCh separately first, otherwise there's a 50% - // chance the second select will send on a closed subscription. - select { - case <-sub.closeCh: - continue - default: + if ctx.Err() != nil { + return } select { + case <-ctx.Done(): + return case sub.reactorUpdatesCh <- peerUpdate: - case <-sub.closeCh: } } } -// Close closes the peer manager, releasing resources (i.e. goroutines). -func (m *PeerManager) Close() { - m.closeOnce.Do(func() { - close(m.closeCh) - }) -} - // Addresses returns all known addresses for a peer, primarily for testing. // The order is arbitrary. func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress { @@ -1287,15 +1273,23 @@ func (p *peerInfo) Score() PeerScore { return PeerScorePersistent } - if p.MutableScore <= 0 { + score := p.MutableScore + + for _, addr := range p.AddressInfo { + // DialFailures is reset when dials succeed, so this + // is either the number of dial failures or 0. + score -= int64(addr.DialFailures) + } + + if score <= 0 { return 0 } - if p.MutableScore >= math.MaxUint8 { + if score >= math.MaxUint8 { return PeerScore(math.MaxUint8) } - return PeerScore(p.MutableScore) + return PeerScore(score) } // Validate validates the peer info. diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index edb5fc6fc0..4c7bef0cc0 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -1,6 +1,7 @@ package p2p import ( + "context" "strings" "testing" "time" @@ -21,7 +22,6 @@ func TestPeerScoring(t *testing.T) { db := dbm.NewMemDB() peerManager, err := NewPeerManager(selfID, db, PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() // create a fake node id := types.NodeID(strings.Repeat("a1", 20)) @@ -29,13 +29,16 @@ func TestPeerScoring(t *testing.T) { require.NoError(t, err) require.True(t, added) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("Synchronous", func(t *testing.T) { // update the manager and make sure it's correct require.EqualValues(t, 0, peerManager.Scores()[id]) // add a bunch of good status updates and watch things increase. for i := 1; i < 10; i++ { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -44,7 +47,7 @@ func TestPeerScoring(t *testing.T) { // watch the corresponding decreases respond to update for i := 10; i == 0; i-- { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) @@ -53,9 +56,8 @@ func TestPeerScoring(t *testing.T) { }) t.Run("AsynchronousIncrement", func(t *testing.T) { start := peerManager.Scores()[id] - pu := peerManager.Subscribe() - defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu := peerManager.Subscribe(ctx) + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -67,9 +69,8 @@ func TestPeerScoring(t *testing.T) { }) t.Run("AsynchronousDecrement", func(t *testing.T) { start := peerManager.Scores()[id] - pu := peerManager.Subscribe() - defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu := peerManager.Subscribe(ctx) + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 69c798d2dd..17d04bac2f 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -154,7 +154,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1}, }) require.NoError(t, err) - defer peerManager.Close() for _, addr := range append(append(aAddresses, bAddresses...), cAddresses...) { added, err := peerManager.Add(addr) @@ -171,8 +170,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { cID: 0, }, peerManager.Scores()) - peerManager.Close() - // Creating a new peer manager with the same database should retain the // peers, but they should have updated scores from the new PersistentPeers // configuration. @@ -181,7 +178,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID)) require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID)) @@ -208,7 +204,6 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) { require.NoError(t, err) require.True(t, added) require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - peerManager.Close() // If we change our selfID to one of the peers in the peer store, it // should be removed from the store. @@ -273,6 +268,9 @@ func TestPeerManager_Add(t *testing.T) { } func TestPeerManager_DialNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -296,6 +294,9 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} options := p2p.PeerManagerOptions{ @@ -311,7 +312,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { // Do five dial retries (six dials total). The retry time should double for // each failure. At the forth retry, MaxRetryTime should kick in. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + ctx, cancel = context.WithTimeout(ctx, 5*time.Second) defer cancel() for i := 0; i <= 5; i++ { @@ -337,11 +338,14 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { require.Fail(t, "unexpected retry") } - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) } } func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -356,7 +360,7 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { }() // This will block until peer is added above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err := peerManager.DialNext(ctx) require.NoError(t, err) @@ -364,6 +368,9 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, }) @@ -389,20 +396,26 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { require.Zero(t, dial) // Spawn a goroutine to fail a's dial attempt. + sig := make(chan struct{}) go func() { + defer close(sig) time.Sleep(200 * time.Millisecond) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) }() // This should make b available for dialing (not a, retries are disabled). - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - dial, err = peerManager.DialNext(ctx) + opctx, opcancel := context.WithTimeout(ctx, 3*time.Second) + defer opcancel() + dial, err = peerManager.DialNext(opctx) require.NoError(t, err) require.Equal(t, b, dial) + <-sig } func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + options := p2p.PeerManagerOptions{MinRetryTime: 200 * time.Millisecond} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options) require.NoError(t, err) @@ -416,12 +429,12 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(dial)) + require.NoError(t, peerManager.DialFailed(ctx, dial)) failed := time.Now() // The retry timer should unblock DialNext and make a available again after // the retry time passes. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -430,6 +443,9 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -445,12 +461,14 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { require.NoError(t, err) require.Zero(t, dial) + dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond) + defer dcancel() go func() { time.Sleep(200 * time.Millisecond) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(dctx, a.NodeID) }() - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -494,6 +512,9 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { } func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -559,7 +580,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // Now, if we disconnect a, we should be allowed to dial d because we have a // free upgrade slot. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, d, dial) @@ -568,7 +589,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // However, if we disconnect b (such that only c and d are connected), we // should not be allowed to dial e even though there are upgrade slots, // because there are no lower-scored nodes that can be upgraded. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) added, err = peerManager.Add(e) require.NoError(t, err) require.True(t, added) @@ -662,6 +683,9 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { } func TestPeerManager_TryDialNext_Multiple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + aID := types.NodeID(strings.Repeat("a", 40)) bID := types.NodeID(strings.Repeat("b", 40)) addresses := []p2p.NodeAddress{ @@ -686,7 +710,7 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) { address, err := peerManager.TryDialNext() require.NoError(t, err) require.NotZero(t, address) - require.NoError(t, peerManager.DialFailed(address)) + require.NoError(t, peerManager.DialFailed(ctx, address)) dial = append(dial, address) } require.ElementsMatch(t, dial, addresses) @@ -711,13 +735,16 @@ func TestPeerManager_DialFailed(t *testing.T) { require.NoError(t, err) require.True(t, added) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Dialing and then calling DialFailed with a different address (same // NodeID) should unmark as dialing and allow us to dial the other address // again, but not register the failed address. dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(p2p.NodeAddress{ + require.NoError(t, peerManager.DialFailed(ctx, p2p.NodeAddress{ Protocol: "tcp", NodeID: aID, Hostname: "localhost"})) require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID)) @@ -726,15 +753,18 @@ func TestPeerManager_DialFailed(t *testing.T) { require.Equal(t, a, dial) // Calling DialFailed on same address twice should be fine. - require.NoError(t, peerManager.DialFailed(a)) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) // DialFailed on an unknown peer shouldn't error or add it. - require.NoError(t, peerManager.DialFailed(b)) + require.NoError(t, peerManager.DialFailed(ctx, b)) require.Equal(t, []types.NodeID{aID}, peerManager.Peers()) } func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -774,7 +804,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { require.Empty(t, dial) // Failing b's dial will now make c available for dialing. - require.NoError(t, peerManager.DialFailed(b)) + require.NoError(t, peerManager.DialFailed(ctx, b)) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, c, dial) @@ -941,6 +971,9 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { } func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -980,7 +1013,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { // In the meanwhile, a disconnects and d connects. d is even lower-scored // than b (1 vs 2), which is currently being upgraded. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) @@ -995,6 +1028,9 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { } func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1030,7 +1066,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { require.Equal(t, c, dial) // In the meanwhile, b disconnects. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) // Once c completes the upgrade of b, there is no longer a need to // evict anything since we're at capacity. @@ -1163,6 +1199,9 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_Accepted_Upgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1199,7 +1238,7 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { evict, err := peerManager.TryEvictNext() require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) // c still cannot get accepted, since it's not scored above b. require.Error(t, peerManager.Accepted(c.NodeID)) @@ -1256,11 +1295,13 @@ func TestPeerManager_Ready(t *testing.T) { a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) // Connecting to a should still have it as status down. added, err := peerManager.Add(a) @@ -1270,7 +1311,7 @@ func TestPeerManager_Ready(t *testing.T) { require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) // Marking a as ready should transition it to PeerStatusUp and send an update. - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.Equal(t, p2p.PeerUpdate{ NodeID: a.NodeID, @@ -1282,13 +1323,16 @@ func TestPeerManager_Ready(t *testing.T) { require.NoError(t, err) require.True(t, added) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) - peerManager.Ready(b.NodeID) + peerManager.Ready(ctx, b.NodeID) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) require.Empty(t, sub.Updates()) } // See TryEvictNext for most tests, this just tests blocking behavior. func TestPeerManager_EvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1298,7 +1342,7 @@ func TestPeerManager_EvictNext(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Since there are no peers to evict, EvictNext should block until timeout. timeoutCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) @@ -1322,6 +1366,9 @@ func TestPeerManager_EvictNext(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1331,7 +1378,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to error a peer after a delay. go func() { @@ -1340,7 +1387,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { }() // This will block until peer errors above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1348,6 +1395,9 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1363,7 +1413,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to upgrade to b with a delay. go func() { @@ -1378,7 +1428,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1386,6 +1436,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1401,7 +1454,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // Spawn a goroutine to upgrade b with a delay. go func() { @@ -1410,13 +1463,16 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) require.Equal(t, a.NodeID, evict) } func TestPeerManager_TryEvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1433,7 +1489,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) { // Connecting to a won't evict anything either. require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) // But if a errors it should be evicted. peerManager.Errored(a.NodeID, errors.New("foo")) @@ -1458,11 +1514,13 @@ func TestPeerManager_Disconnected(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sub := peerManager.Subscribe(ctx) // Disconnecting an unknown peer does nothing. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, peerManager.Peers()) require.Empty(t, sub.Updates()) @@ -1471,14 +1529,14 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) // Disconnecting a ready peer sends a status update. _, err = peerManager.Add(a) require.NoError(t, err) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1486,7 +1544,7 @@ func TestPeerManager_Disconnected(t *testing.T) { Status: p2p.PeerStatusUp, }, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1500,13 +1558,16 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.Equal(t, a, dial) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Zero(t, dial) } func TestPeerManager_Errored(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1530,7 +1591,7 @@ func TestPeerManager_Errored(t *testing.T) { require.Zero(t, evict) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) evict, err = peerManager.TryEvictNext() require.NoError(t, err) require.Zero(t, evict) @@ -1543,14 +1604,16 @@ func TestPeerManager_Errored(t *testing.T) { } func TestPeerManager_Subscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) // This tests all subscription events for full peer lifecycles. - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) added, err := peerManager.Add(a) require.NoError(t, err) @@ -1561,11 +1624,11 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1578,7 +1641,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Dialed(a)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) @@ -1589,7 +1652,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1599,18 +1662,20 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, a, dial) require.Empty(t, sub.Updates()) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) require.Empty(t, sub.Updates()) } func TestPeerManager_Subscribe_Close(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) added, err := peerManager.Add(a) require.NoError(t, err) @@ -1618,17 +1683,20 @@ func TestPeerManager_Subscribe_Close(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) // Closing the subscription should not send us the disconnected update. - sub.Close() - peerManager.Disconnected(a.NodeID) + cancel() + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) } func TestPeerManager_Subscribe_Broadcast(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} @@ -1636,19 +1704,19 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - s1 := peerManager.Subscribe() - defer s1.Close() - s2 := peerManager.Subscribe() - defer s2.Close() - s3 := peerManager.Subscribe() - defer s3.Close() + s2ctx, s2cancel := context.WithCancel(ctx) + defer s2cancel() + + s1 := peerManager.Subscribe(ctx) + s2 := peerManager.Subscribe(s2ctx) + s3 := peerManager.Subscribe(ctx) // Connecting to a peer should send updates on all subscriptions. added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID) + peerManager.Ready(ctx, a.NodeID) expectUp := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp} require.NotEmpty(t, s1) @@ -1660,8 +1728,9 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { // We now close s2. Disconnecting the peer should only send updates // on s1 and s3. - s2.Close() - peerManager.Disconnected(a.NodeID) + s2cancel() + time.Sleep(250 * time.Millisecond) // give the thread a chance to exit + peerManager.Disconnected(ctx, a.NodeID) expectDown := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown} require.NotEmpty(t, s1) @@ -1675,6 +1744,9 @@ func TestPeerManager_Close(t *testing.T) { // leaktest will check that spawned goroutines are closed. t.Cleanup(leaktest.CheckTimeout(t, 1*time.Second)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ @@ -1684,7 +1756,7 @@ func TestPeerManager_Close(t *testing.T) { // This subscription isn't closed, but PeerManager.Close() // should reap the spawned goroutine. - _ = peerManager.Subscribe() + _ = peerManager.Subscribe(ctx) // This dial failure will start a retry timer for 10 seconds, which // should be reaped. @@ -1694,10 +1766,7 @@ func TestPeerManager_Close(t *testing.T) { dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(a)) - - // This should clean up the goroutines. - peerManager.Close() + require.NoError(t, peerManager.DialFailed(ctx, a)) } func TestPeerManager_Advertise(t *testing.T) { @@ -1720,7 +1789,6 @@ func TestPeerManager_Advertise(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(aTCP) require.NoError(t, err) @@ -1760,6 +1828,23 @@ func TestPeerManager_Advertise(t *testing.T) { }, peerManager.Advertise(dID, 2)) } +func TestPeerManager_Advertise_Self(t *testing.T) { + dID := types.NodeID(strings.Repeat("d", 40)) + + self := p2p.NodeAddress{Protocol: "tcp", NodeID: selfID, Hostname: "2001:db8::1", Port: 26657} + + // Create a peer manager with SelfAddress defined. + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ + SelfAddress: self, + }) + require.NoError(t, err) + + // peer manager should always advertise its SelfAddress. + require.ElementsMatch(t, []p2p.NodeAddress{ + self, + }, peerManager.Advertise(dID, 100)) +} + func TestPeerManager_SetHeight_GetHeight(t *testing.T) { a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1787,7 +1872,6 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) { require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) // The heights should not be persisted. - peerManager.Close() peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) require.NoError(t, err) diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 645cc19e13..0c256a4f39 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -21,8 +21,6 @@ var ( _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) -// TODO: Consolidate with params file. -// See https://github.com/tendermint/tendermint/issues/6371 const ( // PexChannel is a channel for PEX messages PexChannel = 0x00 @@ -46,9 +44,6 @@ const ( // the maximum amount of addresses that can be included in a response maxAddresses uint16 = 100 - // allocated time to resolve a node address into a set of endpoints - resolveTimeout = 3 * time.Second - // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -83,11 +78,11 @@ func ChannelDescriptor() *conn.ChannelDescriptor { // adding it to the back of the list once a response is received. type Reactor struct { service.BaseService + logger log.Logger peerManager *p2p.PeerManager pexCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // list of available peers to loop through and send peer requests to availablePeers map[types.NodeID]struct{} @@ -105,9 +100,6 @@ type Reactor struct { // minReceiveRequestInterval). lastReceivedRequests map[types.NodeID]time.Time - // the time when another request will be sent - nextRequestTime time.Time - // keep track of how many new peers to existing peers we have received to // extrapolate the size of the network newPeers uint32 @@ -121,75 +113,102 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( + ctx context.Context, logger log.Logger, peerManager *p2p.PeerManager, - pexCh *p2p.Channel, + channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, -) *Reactor { +) (*Reactor, error) { + + channel, err := channelCreator(ctx, ChannelDescriptor()) + if err != nil { + return nil, err + } r := &Reactor{ + logger: logger, peerManager: peerManager, - pexCh: pexCh, + pexCh: channel, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), availablePeers: make(map[types.NodeID]struct{}), requestsSent: make(map[types.NodeID]struct{}), lastReceivedRequests: make(map[types.NodeID]time.Time), } r.BaseService = *service.NewBaseService(logger, "PEX", r) - return r + return r, nil } // OnStart starts separate go routines for each p2p Channel and listens for // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - go r.processPexCh() - go r.processPeerUpdates() +func (r *Reactor) OnStart(ctx context.Context) error { + go r.processPexCh(ctx) + go r.processPeerUpdates(ctx) return nil } // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *Reactor) OnStop() { - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.pexCh.Done() - <-r.peerUpdates.Done() -} +func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *Reactor) processPexCh() { - defer r.pexCh.Close() +func (r *Reactor) processPexCh(ctx context.Context) { + timer := time.NewTimer(0) + defer timer.Stop() + + r.mtx.Lock() + var ( + duration = r.calculateNextRequestTime() + err error + ) + r.mtx.Unlock() + + incoming := make(chan *p2p.Envelope) + go func() { + defer close(incoming) + iter := r.pexCh.Receive(ctx) + for iter.Next(ctx) { + select { + case <-ctx.Done(): + return + case incoming <- iter.Envelope(): + } + } + }() for { + timer.Reset(duration) + select { - case <-r.closeCh: - r.Logger.Debug("stopped listening on PEX channel; closing...") + case <-ctx.Done(): return // outbound requests for new peers - case <-r.waitUntilNextRequest(): - r.sendRequestForPeers() - + case <-timer.C: + duration, err = r.sendRequestForPeers(ctx) + if err != nil { + return + } // inbound requests for new peers or responses to requests sent by this // reactor - case envelope := <-r.pexCh.In: - if err := r.handleMessage(r.pexCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) - r.pexCh.Error <- p2p.PeerError{ + case envelope, ok := <-incoming: + if !ok { + return + } + duration, err = r.handleMessage(ctx, r.pexCh.ID, envelope) + if err != nil { + r.logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) + if serr := r.pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } } + } } } @@ -197,104 +216,55 @@ func (r *Reactor) processPexCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { + case <-ctx.Done(): + return case peerUpdate := <-r.peerUpdates.Updates(): r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return } } } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope) (time.Duration, error) { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *protop2p.PexRequest: - // Check if the peer hasn't sent a prior request too close to this one - // in time. - if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // parse and send the legacy PEX addresses - pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses)) - r.pexCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &protop2p.PexResponse{Addresses: pexAddresses}, - } - - case *protop2p.PexResponse: - // check if the response matches a request that was made to that peer - if err := r.markPeerResponse(envelope.From); err != nil { - return err - } - - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) - } - - for _, pexAddress := range msg.Addresses { - // no protocol is prefixed so we assume the default (mconn) - peerAddress, err := p2p.ParseNodeAddress( - fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) - if err != nil { - continue - } - added, err := r.peerManager.Add(peerAddress) - if err != nil { - logger.Error("failed to add PEX address", "address", peerAddress, "err", err) - } - if added { - r.newPeers++ - logger.Debug("added PEX address", "address", peerAddress) - } - r.totalPeers++ - } - - // V2 PEX MESSAGES - case *protop2p.PexRequestV2: // check if the peer hasn't sent a prior request too close to this one // in time if err := r.markPeerRequest(envelope.From); err != nil { - return err + return time.Minute, err } // request peers from the peer manager and parse the NodeAddresses into // URL strings nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) - pexAddressesV2 := make([]protop2p.PexAddressV2, len(nodeAddresses)) + pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) for idx, addr := range nodeAddresses { - pexAddressesV2[idx] = protop2p.PexAddressV2{ + pexAddresses[idx] = protop2p.PexAddress{ URL: addr.String(), } } - r.pexCh.Out <- p2p.Envelope{ + if err := r.pexCh.Send(ctx, p2p.Envelope{ To: envelope.From, - Message: &protop2p.PexResponseV2{Addresses: pexAddressesV2}, + Message: &protop2p.PexResponse{Addresses: pexAddresses}, + }); err != nil { + return 0, err } - case *protop2p.PexResponseV2: + return time.Second, nil + case *protop2p.PexResponse: // check if the response matches a request that was made to that peer if err := r.markPeerResponse(envelope.From); err != nil { - return err + return time.Minute, err } // check the size of the response if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", + return 10 * time.Minute, fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", maxAddresses, len(msg.Addresses), ) @@ -307,79 +277,29 @@ func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { } added, err := r.peerManager.Add(peerAddress) if err != nil { - logger.Error("failed to add V2 PEX address", "address", peerAddress, "err", err) + logger.Error("failed to add PEX address", "address", peerAddress, "err", err) } if added { r.newPeers++ - logger.Debug("added V2 PEX address", "address", peerAddress) + logger.Debug("added PEX address", "address", peerAddress) } r.totalPeers++ } + return 10 * time.Minute, nil default: - return fmt.Errorf("received unknown message: %T", msg) + return time.Second, fmt.Errorf("received unknown message: %T", msg) } - - return nil -} - -// resolve resolves a set of peer addresses into PEX addresses. -// -// FIXME: This is necessary because the current PEX protocol only supports -// IP/port pairs, while the P2P stack uses NodeAddress URLs. The PEX protocol -// should really use URLs too, to exchange DNS names instead of IPs and allow -// different transport protocols (e.g. QUIC and MemoryTransport). -// -// FIXME: We may want to cache and parallelize this, but for now we'll just rely -// on the operating system to cache it for us. -func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { - limit := len(addresses) - pexAddresses := make([]protop2p.PexAddress, 0, limit) - - for _, address := range addresses { - ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) - endpoints, err := address.Resolve(ctx) - r.Logger.Debug("resolved node address", "endpoints", endpoints) - cancel() - - if err != nil { - r.Logger.Debug("failed to resolve address", "address", address, "err", err) - continue - } - - for _, endpoint := range endpoints { - r.Logger.Debug("checking endpint", "IP", endpoint.IP, "Port", endpoint.Port) - if len(pexAddresses) >= limit { - return pexAddresses - - } else if endpoint.IP != nil { - r.Logger.Debug("appending pex address") - // PEX currently only supports IP-networked transports (as - // opposed to e.g. p2p.MemoryTransport). - // - // FIXME: as the PEX address contains no information about the - // protocol, we jam this into the ID. We won't need to this once - // we support URLs - pexAddresses = append(pexAddresses, protop2p.PexAddress{ - ID: string(address.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - } - - return pexAddresses } // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (duration time.Duration, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -387,23 +307,22 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received PEX message", "peer", envelope.From) + r.logger.Debug("received PEX message", "peer", envelope.From) switch chID { case p2p.ChannelID(PexChannel): - err = r.handlePexMessage(envelope) - + duration, err = r.handlePexMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } - return err + return } // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // send a request for addresses. func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) + r.logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -419,23 +338,17 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -func (r *Reactor) waitUntilNextRequest() <-chan time.Time { - return time.After(time.Until(r.nextRequestTime)) -} - // sendRequestForPeers pops the first peerID off the list and sends the // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *Reactor) sendRequestForPeers() { +func (r *Reactor) sendRequestForPeers(ctx context.Context) (time.Duration, error) { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { // no peers are available - r.Logger.Debug("no available peers to send request to, waiting...") - r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod) - - return + r.logger.Debug("no available peers to send request to, waiting...") + return noAvailablePeersWaitPeriod, nil } var peerID types.NodeID @@ -444,25 +357,21 @@ func (r *Reactor) sendRequestForPeers() { break } - // The node accommodates for both pex systems - if r.isLegacyPeer(peerID) { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequest{}, - } - } else { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequestV2{}, - } + // send out the pex request + if err := r.pexCh.Send(ctx, p2p.Envelope{ + To: peerID, + Message: &protop2p.PexRequest{}, + }); err != nil { + return 0, err } // remove the peer from the abvailable peers list and mark it in the requestsSent map delete(r.availablePeers, peerID) r.requestsSent[peerID] = struct{}{} - r.calculateNextRequestTime() - r.Logger.Debug("peer request sent", "next_request_time", r.nextRequestTime) + dur := r.calculateNextRequestTime() + r.logger.Debug("peer request sent", "next_request_time", dur) + return dur, nil } // calculateNextRequestTime implements something of a proportional controller @@ -474,15 +383,15 @@ func (r *Reactor) sendRequestForPeers() { // as possible. As the node becomes more familiar with the network the ratio of // new nodes will plummet to a very small number, meaning the interval expands // to its upper bound. -// CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *Reactor) calculateNextRequestTime() { +// +// CONTRACT: The caller must hold r.mtx exclusively when calling this method. +func (r *Reactor) calculateNextRequestTime() time.Duration { // check if the peer store is full. If so then there is no need // to send peer requests too often if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { - r.Logger.Debug("peer manager near full ratio, sleeping...", + r.logger.Debug("peer manager near full ratio, sleeping...", "sleep_period", fullCapacityInterval, "ratio", ratio) - r.nextRequestTime = time.Now().Add(fullCapacityInterval) - return + return fullCapacityInterval } // baseTime represents the shortest interval that we can send peer requests @@ -508,7 +417,7 @@ func (r *Reactor) calculateNextRequestTime() { } // NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry // about the next request time being less than the minimum time - r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) + return baseTime * time.Duration(r.discoveryRatio) } func (r *Reactor) markPeerRequest(peer types.NodeID) error { @@ -538,14 +447,3 @@ func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.availablePeers[peer] = struct{}{} return nil } - -// all addresses must use a MCONN protocol for the peer to be considered part of the -// legacy p2p pex system -func (r *Reactor) isLegacyPeer(peer types.NodeID) bool { - for _, addr := range r.peerManager.Addresses(peer) { - if addr.Protocol != p2p.MConnProtocol { - return false - } - } - return true -} diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index b7e1a01c3d..b4197095f7 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,7 +1,11 @@ +// Temporarily disabled pending ttps://github.com/tendermint/tendermint/issues/7626. +//go:build issue7626 + package pex_test import ( "context" + "errors" "strings" "testing" "time" @@ -27,36 +31,40 @@ const ( firstNode = 0 secondNode = 1 thirdNode = 2 - fourthNode = 3 ) func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // start a network with one mock reactor and one "real" reactor - testNet := setupNetwork(t, testOptions{ + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // assert that the mock node receives a request from the real node - testNet.listenForRequest(t, secondNode, firstNode, shortWait) + testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait) // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) + testNet.sendRequest(ctx, t, firstNode, secondNode) + testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 4, }) // make every node be only connected with one other node (it actually ends up // being two because of two way connections but oh well) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -65,23 +73,26 @@ func TestReactorConnectFullNetwork(t *testing.T) { } func TestReactorSendsRequestsTooOften(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) badNode := newNodeID(t, "b") r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*p2pproto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponse) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } peerErr := <-r.pexErrCh @@ -92,32 +103,38 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { } func TestReactorSendsResponseWithoutRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 3, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode - testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) + testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) } func TestReactorNeverSendsTooManyPeers(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) - testNet.addNodes(t, 110) + testNet.addNodes(ctx, t, 110) nodes := make([]int, 110) for i := 0; i < len(nodes); i++ { nodes[i] = i + 2 @@ -126,20 +143,23 @@ func TestReactorNeverSendsTooManyPeers(t *testing.T) { // first we check that even although we have 110 peers, honest pex reactors // only send 100 (test if secondNode sends firstNode 100 addresses) - testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100) + testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100) } func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} added, err := r.manager.Add(peer) require.NoError(t, err) require.True(t, added) - addresses := make([]p2pproto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddress, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = p2pproto.PexAddressV2{ + addresses[i] = p2pproto.PexAddress{ URL: nodeAddress.String(), } } @@ -152,12 +172,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequest); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &p2pproto.PexResponseV2{ + Message: &p2pproto.PexResponse{ Addresses: addresses, }, } @@ -174,14 +194,17 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { } func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 8, MaxPeers: 4, MaxConnected: 3, BufferSize: 8, }) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // test that all nodes reach full capacity for _, nodeID := range testNet.nodes { @@ -193,14 +216,17 @@ func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { } func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 3, MaxPeers: 25, MaxConnected: 25, BufferSize: 5, }) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -209,12 +235,15 @@ func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { } func TestReactorWithNetworkGrowth(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 5, BufferSize: 5, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -222,14 +251,14 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } // now we inject 10 more nodes - testNet.addNodes(t, 10) + testNet.addNodes(ctx, t, 10) for i := 5; i < testNet.total; i++ { node := testNet.nodes[i] - require.NoError(t, testNet.reactors[node].Start()) + require.NoError(t, testNet.reactors[node].Start(ctx)) require.True(t, testNet.reactors[node].IsRunning()) // we connect all new nodes to a single entry point and check that the // node can distribute the addresses to all the others - testNet.connectPeers(t, 0, i) + testNet.connectPeers(ctx, t, 0, i) } require.Len(t, testNet.reactors, 15) @@ -239,38 +268,6 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } } -func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 3, - }) - testNet.connectAll(t) - testNet.start(t) - t.Log(testNet.nodes) - - // mock node sends a V1 Pex message to the second node - testNet.sendRequest(t, firstNode, secondNode, false) - addrs := testNet.getAddressesFor(t, []int{thirdNode}) - testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs) -} - -func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 4, - BufferSize: 4, - }) - testNet.connectPeers(t, firstNode, secondNode) - testNet.connectPeers(t, firstNode, thirdNode) - testNet.connectPeers(t, firstNode, fourthNode) - testNet.start(t) - - testNet.listenForRequest(t, secondNode, firstNode, shortWait) - // send a v1 response instead - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false) - testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait) -} - type singleTestReactor struct { reactor *pex.Reactor pexInCh chan p2p.Envelope @@ -281,7 +278,7 @@ type singleTestReactor struct { manager *p2p.PeerManager } -func setupSingle(t *testing.T) *singleTestReactor { +func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { t.Helper() nodeID := newNodeID(t, "a") chBuf := 2 @@ -301,16 +298,15 @@ func setupSingle(t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) - require.NoError(t, reactor.Start()) - t.Cleanup(func() { - err := reactor.Stop() - if err != nil { - t.Fatal(err) - } - pexCh.Close() - peerUpdates.Close() - }) + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return pexCh, nil + } + + reactor, err := pex.NewReactor(ctx, log.TestingLogger(), peerManager, chCreator, peerUpdates) + require.NoError(t, err) + + require.NoError(t, reactor.Start(ctx)) + t.Cleanup(reactor.Wait) return &singleTestReactor{ reactor: reactor, @@ -349,7 +345,7 @@ type testOptions struct { // setup setups a test suite with a network of nodes. Mocknodes represent the // hollow nodes that the test can listen and send on -func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { +func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite { t.Helper() require.Greater(t, opts.TotalNodes, opts.MockNodes) @@ -369,7 +365,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, networkOpts), + network: p2ptest.MakeNetwork(ctx, t, networkOpts), reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), @@ -380,24 +376,31 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor - rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor()) + rts.pexChannels = rts.network.MakeChannelsNoCleanup(ctx, t, pex.ChannelDescriptor()) idx := 0 for nodeID := range rts.network.Nodes { rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.pexChannels[nodeID], nil + } // the first nodes in the array are always mock nodes if idx < opts.MockNodes { rts.mocks = append(rts.mocks, nodeID) } else { - rts.reactors[nodeID] = pex.NewReactor( + var err error + rts.reactors[nodeID], err = pex.NewReactor( + ctx, rts.logger.With("nodeID", nodeID), rts.network.Nodes[nodeID].PeerManager, - rts.pexChannels[nodeID], + chCreator, rts.peerUpdates[nodeID], ) + require.NoError(t, err) } rts.nodes = append(rts.nodes, nodeID) @@ -407,17 +410,11 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { require.Len(t, rts.reactors, realNodes) t.Cleanup(func() { - for nodeID, reactor := range rts.reactors { + for _, reactor := range rts.reactors { if reactor.IsRunning() { - require.NoError(t, reactor.Stop()) + reactor.Wait() require.False(t, reactor.IsRunning()) } - rts.pexChannels[nodeID].Close() - rts.peerUpdates[nodeID].Close() - } - for _, nodeID := range rts.mocks { - rts.pexChannels[nodeID].Close() - rts.peerUpdates[nodeID].Close() } }) @@ -425,89 +422,102 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { } // starts up the pex reactors for each node -func (r *reactorTestSuite) start(t *testing.T) { +func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() for _, reactor := range r.reactors { - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) } } -func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { +func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) { t.Helper() for i := 0; i < nodes; i++ { - node := r.network.MakeNode(t, p2ptest.NodeOptions{ + node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{ MaxPeers: r.opts.MaxPeers, MaxConnected: r.opts.MaxConnected, }) r.network.Nodes[node.NodeID] = node nodeID := node.NodeID - r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor()) + r.pexChannels[nodeID] = node.MakeChannelNoCleanup(ctx, t, pex.ChannelDescriptor()) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) - r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) - r.reactors[nodeID] = pex.NewReactor( + r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID]) + + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return r.pexChannels[nodeID], nil + } + + var err error + r.reactors[nodeID], err = pex.NewReactor( + ctx, r.logger.With("nodeID", nodeID), r.network.Nodes[nodeID].PeerManager, - r.pexChannels[nodeID], + chCreator, r.peerUpdates[nodeID], ) + require.NoError(t, err) r.nodes = append(r.nodes, nodeID) r.total++ } } func (r *reactorTestSuite) listenFor( + ctx context.Context, t *testing.T, node types.NodeID, - conditional func(msg p2p.Envelope) bool, - assertion func(t *testing.T, msg p2p.Envelope) bool, + conditional func(msg *p2p.Envelope) bool, + assertion func(t *testing.T, msg *p2p.Envelope) bool, waitPeriod time.Duration, ) { - timesUp := time.After(waitPeriod) - for { - select { - case envelope := <-r.pexChannels[node].In: - if conditional(envelope) && assertion(t, envelope) { - return - } - case <-timesUp: - require.Fail(t, "timed out waiting for message", - "node=%v, waitPeriod=%s", node, waitPeriod) + ctx, cancel := context.WithTimeout(ctx, waitPeriod) + defer cancel() + iter := r.pexChannels[node].Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if conditional(envelope) && assertion(t, envelope) { + return } } + + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.Fail(t, "timed out waiting for message", + "node=%v, waitPeriod=%s", node, waitPeriod) + } + } -func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { - r.logger.Info("Listening for request", "from", fromNode, "to", toNode) +func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexRequestV2) + conditional := func(msg *p2p.Envelope) bool { + _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) + assertion := func(t *testing.T, msg *p2p.Envelope) bool { + require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) pingAndlistenForNAddresses( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, addresses int, ) { - r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) + t.Helper() + to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) + conditional := func(msg *p2p.Envelope) bool { + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*p2pproto.PexResponseV2) + assertion := func(t *testing.T, msg *p2p.Envelope) bool { + m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") return true @@ -519,64 +529,47 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(ctx, t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode, true) - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.sendRequest(ctx, t, toNode, fromNode) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForResponse( - t *testing.T, - fromNode, toNode int, - waitPeriod time.Duration, - addresses []p2pproto.PexAddressV2, -) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) - to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) - r.logger.Info("message", msg, "ok", ok) - return ok && msg.From == from - } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) - return true - } - r.listenFor(t, to, conditional, assertion, waitPeriod) -} - -func (r *reactorTestSuite) listenForLegacyResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, addresses []p2pproto.PexAddress, ) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { + conditional := func(msg *p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { + assertion := func(t *testing.T, msg *p2p.Envelope) bool { require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForPeerUpdate( + ctx context.Context, t *testing.T, onNode, withNode int, status p2p.PeerStatus, waitPeriod time.Duration, ) { on, with := r.checkNodePair(t, onNode, withNode) - sub := r.network.Nodes[on].PeerManager.Subscribe() - defer sub.Close() + sub := r.network.Nodes[on].PeerManager.Subscribe(ctx) timesUp := time.After(waitPeriod) for { select { + case <-ctx.Done(): + require.Fail(t, "operation canceled") + return case peerUpdate := <-sub.Updates(): if peerUpdate.NodeID == with { require.Equal(t, status, peerUpdate.Status) @@ -591,73 +584,41 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { - addresses := make([]p2pproto.PexAddressV2, len(nodes)) - for idx, node := range nodes { - nodeID := r.nodes[node] - addresses[idx] = p2pproto.PexAddressV2{ - URL: r.network.Nodes[nodeID].NodeAddress.String(), - } - } - return addresses -} - -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { +func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - nodeAddrs := r.network.Nodes[nodeID].NodeAddress - endpoints, err := nodeAddrs.Resolve(context.Background()) - require.NoError(t, err) - require.Len(t, endpoints, 1) addresses[idx] = p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoints[0].IP.String(), - Port: uint32(endpoints[0].Port), + URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) { +func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) { + t.Helper() to, from := r.checkNodePair(t, toNode, fromNode) - if v2 { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequestV2{}, - } - } else { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequest{}, - } - } + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ + To: to, + Message: &p2pproto.PexRequest{}, + })) } func (r *reactorTestSuite) sendResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, withNodes []int, - v2 bool, ) { + t.Helper() from, to := r.checkNodePair(t, fromNode, toNode) - if v2 { - addrs := r.getV2AddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponseV2{ - Addresses: addrs, - }, - } - } else { - addrs := r.getAddressesFor(t, withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponse{ - Addresses: addrs, - }, - } - } + addrs := r.getAddressesFor(withNodes) + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ + To: to, + Message: &p2pproto.PexResponse{ + Addresses: addrs, + }, + })) } func (r *reactorTestSuite) requireNumberOfPeers( @@ -676,28 +637,27 @@ func (r *reactorTestSuite) requireNumberOfPeers( ) } -func (r *reactorTestSuite) connectAll(t *testing.T) { - r.connectN(t, r.total-1) +func (r *reactorTestSuite) connectAll(ctx context.Context, t *testing.T) { + r.connectN(ctx, t, r.total-1) } // connects all nodes with n other nodes -func (r *reactorTestSuite) connectN(t *testing.T, n int) { +func (r *reactorTestSuite) connectN(ctx context.Context, t *testing.T, n int) { if n >= r.total { require.Fail(t, "connectN: n must be less than the size of the network - 1") } for i := 0; i < r.total; i++ { for j := 0; j < n; j++ { - r.connectPeers(t, i, (i+j+1)%r.total) + r.connectPeers(ctx, t, i, (i+j+1)%r.total) } } } // connects node1 to node2 -func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int) { +func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourceNode, targetNode int) { t.Helper() node1, node2 := r.checkNodePair(t, sourceNode, targetNode) - r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode) n1 := r.network.Nodes[node1] if n1 == nil { @@ -711,22 +671,16 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int return } - sourceSub := n1.PeerManager.Subscribe() - defer sourceSub.Close() - targetSub := n2.PeerManager.Subscribe() - defer targetSub.Close() + sourceSub := n1.PeerManager.Subscribe(ctx) + targetSub := n2.PeerManager.Subscribe(ctx) sourceAddress := n1.NodeAddress - r.logger.Debug("source address", "address", sourceAddress) targetAddress := n2.NodeAddress - r.logger.Debug("target address", "address", targetAddress) added, err := n1.PeerManager.Add(targetAddress) require.NoError(t, err) if !added { - r.logger.Debug("nodes already know about one another", - "sourceNode", sourceNode, "targetNode", targetNode) return } @@ -736,19 +690,16 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int NodeID: node1, Status: p2p.PeerStatusUp, }, peerUpdate) - r.logger.Debug("target connected with source") case <-time.After(2 * time.Second): require.Fail(t, "timed out waiting for peer", "%v accepting %v", targetNode, sourceNode) } - select { case peerUpdate := <-sourceSub.Updates(): require.Equal(t, p2p.PeerUpdate{ NodeID: node2, Status: p2p.PeerStatusUp, }, peerUpdate) - r.logger.Debug("source connected with target") case <-time.After(2 * time.Second): require.Fail(t, "timed out waiting for peer", "%v dialing %v", sourceNode, targetNode) @@ -759,32 +710,6 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int require.True(t, added) } -// nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { - var addresses []p2pproto.PexAddress - for _, i := range nodeIndices { - if i < len(r.nodes) { - require.Fail(t, "index for pex address is greater than number of nodes") - } - nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - endpoints, err := nodeAddrs.Resolve(ctx) - cancel() - require.NoError(t, err) - for _, endpoint := range endpoints { - if endpoint.IP != nil { - addresses = append(addresses, p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - - } - return addresses -} - func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index e0e812cf5e..ebfa2885bb 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -2,6 +2,7 @@ package p2p import ( "container/heap" + "context" "sort" "strconv" "time" @@ -140,8 +141,8 @@ func (s *pqScheduler) closed() <-chan struct{} { } // start starts non-blocking process that starts the priority queue scheduler. -func (s *pqScheduler) start() { - go s.process() +func (s *pqScheduler) start(ctx context.Context) { + go s.process(ctx) } // process starts a block process where we listen for Envelopes to enqueue. If @@ -153,17 +154,17 @@ func (s *pqScheduler) start() { // // After we attempt to enqueue the incoming Envelope, if the priority queue is // non-empty, we pop the top Envelope and send it on the dequeueCh. -func (s *pqScheduler) process() { +func (s *pqScheduler) process(ctx context.Context) { defer s.done.Close() for { select { case e := <-s.enqueueCh: - chIDStr := strconv.Itoa(int(e.channelID)) + chIDStr := strconv.Itoa(int(e.ChannelID)) pqEnv := &pqEnvelope{ envelope: e, size: uint(proto.Size(e.Message)), - priority: s.chPriorities[e.channelID], + priority: s.chPriorities[e.ChannelID], timestamp: time.Now().UTC(), } @@ -202,7 +203,7 @@ func (s *pqScheduler) process() { if tmpSize+pqEnv.size <= s.capacity { canEnqueue = true } else { - pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.channelID)) + pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID)) s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1) s.logger.Debug( "dropped envelope", @@ -257,7 +258,8 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, - "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) + "peer_id", string(pqEnv.envelope.To), + "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) s.metrics.PeerPendingSendBytes.With( "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { @@ -266,7 +268,8 @@ func (s *pqScheduler) process() { return } } - + case <-ctx.Done(): + return case <-s.closer.Done(): return } @@ -274,7 +277,7 @@ func (s *pqScheduler) process() { } func (s *pqScheduler) push(pqEnv *pqEnvelope) { - chIDStr := strconv.Itoa(int(pqEnv.envelope.channelID)) + chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID)) // enqueue the incoming Envelope heap.Push(s.pq, pqEnv) diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index ffa7e39a8d..03841d0009 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -1,6 +1,7 @@ package p2p import ( + "context" "testing" "time" @@ -19,12 +20,15 @@ func TestCloseWhileDequeueFull(t *testing.T) { for i := 0; i < enqueueLength; i++ { pqueue.enqueue() <- Envelope{ - channelID: 0x01, + ChannelID: 0x01, Message: &testMessage{Value: "foo"}, // 5 bytes } } - go pqueue.process() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go pqueue.process(ctx) // sleep to allow context switch for process() to run time.Sleep(10 * time.Millisecond) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 6c46946244..c60fe30a50 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,102 +21,6 @@ import ( const queueBufferDefault = 32 -const dialRandomizerIntervalMillisecond = 3000 - -// Envelope contains a message with sender/receiver routing info. -type Envelope struct { - From types.NodeID // sender (empty if outbound) - To types.NodeID // receiver (empty if inbound) - Broadcast bool // send to all connected peers (ignores To) - Message proto.Message // message payload - - // channelID is for internal Router use, set on outbound messages to inform - // the sendPeer() goroutine which transport channel to use. - // - // FIXME: If we migrate the Transport API to a byte-oriented multi-stream - // API, this will no longer be necessary since each channel will be mapped - // onto a stream during channel/peer setup. See: - // https://github.com/tendermint/spec/pull/227 - channelID ChannelID -} - -// PeerError is a peer error reported via Channel.Error. -// -// FIXME: This currently just disconnects the peer, which is too simplistic. -// For example, some errors should be logged, some should cause disconnects, -// and some should ban the peer. -// -// FIXME: This should probably be replaced by a more general PeerBehavior -// concept that can mark good and bad behavior and contributes to peer scoring. -// It should possibly also allow reactors to request explicit actions, e.g. -// disconnection or banning, in addition to doing this based on aggregates. -type PeerError struct { - NodeID types.NodeID - Err error -} - -// Channel is a bidirectional channel to exchange Protobuf messages with peers, -// wrapped in Envelope to specify routing info (i.e. sender/receiver). -type Channel struct { - ID ChannelID - In <-chan Envelope // inbound messages (peers to reactors) - Out chan<- Envelope // outbound messages (reactors to peers) - Error chan<- PeerError // peer error reporting - - messageType proto.Message // the channel's message type, used for unmarshaling - closeCh chan struct{} - closeOnce sync.Once -} - -// NewChannel creates a new channel. It is primarily for internal and test -// use, reactors should use Router.OpenChannel(). -func NewChannel( - id ChannelID, - messageType proto.Message, - inCh <-chan Envelope, - outCh chan<- Envelope, - errCh chan<- PeerError, -) *Channel { - return &Channel{ - ID: id, - messageType: messageType, - In: inCh, - Out: outCh, - Error: errCh, - closeCh: make(chan struct{}), - } -} - -// Close closes the channel. Future sends on Out and Error will panic. The In -// channel remains open to avoid having to synchronize Router senders, which -// should use Done() to detect channel closure instead. -func (c *Channel) Close() { - c.closeOnce.Do(func() { - close(c.closeCh) - close(c.Out) - close(c.Error) - }) -} - -// Done returns a channel that's closed when Channel.Close() is called. -func (c *Channel) Done() <-chan struct{} { - return c.closeCh -} - -// Wrapper is a Protobuf message that can contain a variety of inner messages -// (e.g. via oneof fields). If a Channel's message type implements Wrapper, the -// Router will automatically wrap outbound messages and unwrap inbound messages, -// such that reactors do not have to do this themselves. -type Wrapper interface { - proto.Message - - // Wrap will take a message and wrap it in this one if possible. - Wrap(proto.Message) error - - // Unwrap will unwrap the inner message contained in this message. - Unwrap() (proto.Message, error) -} - // RouterOptions specifies options for a Router. type RouterOptions struct { // ResolveTimeout is the timeout for resolving NodeAddress URLs. @@ -242,8 +146,8 @@ func (o *RouterOptions) Validate() error { // quality of service. type Router struct { *service.BaseService + logger log.Logger - logger log.Logger metrics *Metrics options RouterOptions nodeInfo types.NodeInfo @@ -251,9 +155,9 @@ type Router struct { peerManager *PeerManager chDescs []*ChannelDescriptor transports []Transport + endpoints []Endpoint connTracker connectionTracker protocolTransports map[Protocol]Transport - stopCh chan struct{} // signals Router shutdown peerMtx sync.RWMutex peerQueues map[types.NodeID]queue // outbound messages per peer for all channels @@ -273,12 +177,14 @@ type Router struct { // listening on appropriate interfaces, and will be closed by the Router when it // stops. func NewRouter( + ctx context.Context, logger log.Logger, metrics *Metrics, nodeInfo types.NodeInfo, privKey crypto.PrivKey, peerManager *PeerManager, transports []Transport, + endpoints []Endpoint, options RouterOptions, ) (*Router, error) { @@ -297,10 +203,10 @@ func NewRouter( ), chDescs: make([]*ChannelDescriptor, 0), transports: transports, + endpoints: endpoints, protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, options: options, - stopCh: make(chan struct{}), channelQueues: map[ChannelID]queue{}, channelMessages: map[ChannelID]proto.Message{}, peerQueues: map[types.NodeID]queue{}, @@ -309,7 +215,7 @@ func NewRouter( router.BaseService = service.NewBaseService(logger, "router", router) - qf, err := router.createQueueFactory() + qf, err := router.createQueueFactory(ctx) if err != nil { return nil, err } @@ -327,7 +233,7 @@ func NewRouter( return router, nil } -func (r *Router) createQueueFactory() (func(int) queue, error) { +func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error) { switch r.options.QueueType { case queueTypeFifo: return newFIFOQueue, nil @@ -339,7 +245,7 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { } q := newPQScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() + q.start(ctx) return q }, nil @@ -348,13 +254,18 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { } } +// ChannelCreator allows routers to construct their own channels, +// either by receiving a reference to Router.OpenChannel or using some +// kind shim for testing purposes. +type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) + // OpenChannel opens a new channel for the given message type. The caller must // close the channel when done, before stopping the Router. messageType is the // type of message passed through the channel (used for unmarshaling), which can // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { +func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() @@ -395,7 +306,7 @@ func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { queue.close() }() - r.routeChannel(id, outCh, errCh, wrapper) + r.routeChannel(ctx, id, outCh, errCh, wrapper) }() return channel, nil @@ -407,6 +318,7 @@ func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { // closed, or the Router is stopped. wrapper is an optional message wrapper // for messages, see Wrapper for details. func (r *Router) routeChannel( + ctx context.Context, chID ChannelID, outCh <-chan Envelope, errCh <-chan PeerError, @@ -421,13 +333,13 @@ func (r *Router) routeChannel( // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). - envelope.channelID = chID + envelope.ChannelID = chID // wrap the message in a wrapper message, if requested if wrapper != nil { msg := proto.Clone(wrapper) if err := msg.(Wrapper).Wrap(envelope.Message); err != nil { - r.Logger.Error("failed to wrap message", "channel", chID, "err", err) + r.logger.Error("failed to wrap message", "channel", chID, "err", err) continue } @@ -490,7 +402,7 @@ func (r *Router) routeChannel( case <-q.closed(): r.logger.Debug("dropping message for unconnected peer", "peer", envelope.To, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return } } @@ -503,8 +415,7 @@ func (r *Router) routeChannel( r.logger.Error("peer error, evicting", "peer", peerError.NodeID, "err", peerError.Err) r.peerManager.Errored(peerError.NodeID, peerError.Err) - - case <-r.stopCh: + case <-ctx.Done(): return } } @@ -536,8 +447,15 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + const ( + maxDialerInterval = 3000 + minDialerInterval = 250 + ) + // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) + dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) + + timer := time.NewTimer(dur * time.Millisecond) defer timer.Stop() select { @@ -553,11 +471,9 @@ func (r *Router) dialSleep(ctx context.Context) { // acceptPeers accepts inbound connections from peers on the given transport, // and spawns goroutines that route messages to/from them. -func (r *Router) acceptPeers(transport Transport) { - r.logger.Debug("starting accept routine", "transport", transport) - ctx := r.stopCtx() +func (r *Router) acceptPeers(ctx context.Context, transport Transport) { for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) switch err { case nil: case io.EOF: @@ -632,14 +548,11 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { return } - r.routePeer(peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels)) + r.routePeer(ctx, peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels)) } // dialPeers maintains outbound connections to peers by dialing them. -func (r *Router) dialPeers() { - r.logger.Debug("starting dial routine") - ctx := r.stopCtx() - +func (r *Router) dialPeers(ctx context.Context) { addresses := make(chan NodeAddress) wg := &sync.WaitGroup{} @@ -670,7 +583,6 @@ LOOP: address, err := r.peerManager.DialNext(ctx) switch { case errors.Is(err, context.Canceled): - r.logger.Debug("stopping dial routine") break LOOP case err != nil: r.logger.Error("failed to find next peer to dial", "err", err) @@ -701,7 +613,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return case err != nil: r.logger.Error("failed to dial peer", "peer", address, "err", err) - if err = r.peerManager.DialFailed(address); err != nil { + if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } return @@ -714,7 +626,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return case err != nil: r.logger.Error("failed to handshake with peer", "peer", address, "err", err) - if err = r.peerManager.DialFailed(address); err != nil { + if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } conn.Close() @@ -729,7 +641,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { } // routePeer (also) calls connection close - go r.routePeer(address.NodeID, conn, toChannelIDs(peerInfo.Channels)) + go r.routePeer(ctx, address.NodeID, conn, toChannelIDs(peerInfo.Channels)) } func (r *Router) getOrMakeQueue(peerID types.NodeID, channels channelIDs) queue { @@ -844,9 +756,9 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // routePeer routes inbound and outbound messages between a peer and the reactor // channels. It will close the given connection and send queue when done, or if // they are closed elsewhere it will cause this method to shut down and return. -func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channelIDs) { +func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels channelIDs) { r.metrics.Peers.Add(1) - r.peerManager.Ready(peerID) + r.peerManager.Ready(ctx, peerID) sendQueue := r.getOrMakeQueue(peerID, channels) defer func() { @@ -857,7 +769,7 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channe sendQueue.close() - r.peerManager.Disconnected(peerID) + r.peerManager.Disconnected(ctx, peerID) r.metrics.Peers.Add(-1) }() @@ -866,27 +778,46 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channe errCh := make(chan error, 2) go func() { - errCh <- r.receivePeer(peerID, conn) + select { + case errCh <- r.receivePeer(ctx, peerID, conn): + case <-ctx.Done(): + } }() go func() { - errCh <- r.sendPeer(peerID, conn, sendQueue) + select { + case errCh <- r.sendPeer(ctx, peerID, conn, sendQueue): + case <-ctx.Done(): + } }() - err := <-errCh + var err error + select { + case err = <-errCh: + case <-ctx.Done(): + } + _ = conn.Close() sendQueue.close() - if e := <-errCh; err == nil { + select { + case <-ctx.Done(): + case e := <-errCh: // The first err was nil, so we update it with the second err, which may // or may not be nil. + if err == nil { + err = e + } + } + + // if the context was canceled + if e := ctx.Err(); err == nil && e != nil { err = e } switch err { case nil, io.EOF: r.logger.Info("peer disconnected", "peer", peerID, "endpoint", conn) - default: r.logger.Error("peer failure", "peer", peerID, "endpoint", conn, "err", err) } @@ -894,9 +825,9 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channe // receivePeer receives inbound messages from a peer, deserializes them and // passes them on to the appropriate channel. -func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { +func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Connection) error { for { - chID, bz, err := conn.ReceiveMessage() + chID, bz, err := conn.ReceiveMessage(ctx) if err != nil { return err } @@ -928,24 +859,25 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { start := time.Now().UTC() select { - case queue.enqueue() <- Envelope{From: peerID, Message: msg}: + case queue.enqueue() <- Envelope{From: peerID, Message: msg, ChannelID: chID}: r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), - "peer_id", string(peerID)).Add(float64(proto.Size(msg))) + "peer_id", string(peerID), + "message_type", r.metrics.ValueToMetricLabel(msg)).Add(float64(proto.Size(msg))) r.metrics.RouterChannelQueueSend.Observe(time.Since(start).Seconds()) r.logger.Debug("received message", "peer", peerID, "message", msg) case <-queue.closed(): r.logger.Debug("channel closed, dropping message", "peer", peerID, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return nil } } } // sendPeer sends queued messages to a peer. -func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) error { +func (r *Router) sendPeer(ctx context.Context, peerID types.NodeID, conn Connection, peerQueue queue) error { for { start := time.Now().UTC() @@ -963,7 +895,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - if err = conn.SendMessage(envelope.channelID, bz); err != nil { + if err = conn.SendMessage(ctx, envelope.ChannelID, bz); err != nil { return err } @@ -972,25 +904,20 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) case <-peerQueue.closed(): return nil - case <-r.stopCh: + case <-ctx.Done(): return nil } } } // evictPeers evicts connected peers as requested by the peer manager. -func (r *Router) evictPeers() { - r.logger.Debug("starting evict routine") - ctx := r.stopCtx() - +func (r *Router) evictPeers(ctx context.Context) { for { peerID, err := r.peerManager.EvictNext(ctx) switch { case errors.Is(err, context.Canceled): - r.logger.Debug("stopping evict routine") return - case err != nil: r.logger.Error("failed to find next peer to evict", "err", err) return @@ -1014,19 +941,28 @@ func (r *Router) NodeInfo() types.NodeInfo { } // OnStart implements service.Service. -func (r *Router) OnStart() error { - r.Logger.Info( +func (r *Router) OnStart(ctx context.Context) error { + for _, transport := range r.transports { + for _, endpoint := range r.endpoints { + if err := transport.Listen(endpoint); err != nil { + return err + } + } + } + + r.logger.Info( "starting router", "node_id", r.nodeInfo.NodeID, "channels", r.nodeInfo.Channels, "listen_addr", r.nodeInfo.ListenAddr, + "transports", len(r.transports), ) - go r.dialPeers() - go r.evictPeers() + go r.dialPeers(ctx) + go r.evictPeers(ctx) for _, transport := range r.transports { - go r.acceptPeers(transport) + go r.acceptPeers(ctx, transport) } return nil @@ -1039,9 +975,6 @@ func (r *Router) OnStart() error { // here, since that would cause any reactor senders to panic, so it is the // sender's responsibility. func (r *Router) OnStop() { - // Signal router shutdown. - close(r.stopCh) - // Close transport listeners (unblocks Accept calls). for _, transport := range r.transports { if err := transport.Close(); err != nil { @@ -1065,22 +998,11 @@ func (r *Router) OnStop() { r.peerMtx.RUnlock() for _, q := range queues { + q.close() <-q.closed() } } -// stopCtx returns a new context that is canceled when the router stops. -func (r *Router) stopCtx() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-r.stopCh - cancel() - }() - - return ctx -} - type channelIDs map[ChannelID]struct{} func toChannelIDs(bytes []byte) channelIDs { diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index b90d2a3dd9..b2a8fe1a05 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -1,6 +1,7 @@ package p2p import ( + "context" "os" "testing" @@ -10,6 +11,9 @@ import ( ) func TestRouter_ConstructQueueFactory(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("ValidateOptionsPopulatesDefaultQueue", func(t *testing.T) { opts := RouterOptions{} require.NoError(t, opts.Validate()) @@ -18,21 +22,21 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { t.Run("Default", func(t *testing.T) { require.Zero(t, os.Getenv("TM_P2P_QUEUE")) opts := RouterOptions{} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(ctx, log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Fifo", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeFifo} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(ctx, log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Priority", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypePriority} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(ctx, log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.NoError(t, err) q, ok := r.queueFactory(1).(*pqScheduler) require.True(t, ok) @@ -40,7 +44,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} - _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + _, err := NewRouter(ctx, log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, nil, opts) require.Error(t, err) require.Contains(t, err.Error(), "fast") }) @@ -48,7 +52,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { r := &Router{} require.Zero(t, r.options.QueueType) - fn, err := r.createQueueFactory() + fn, err := r.createQueueFactory(ctx) require.Error(t, err) require.Nil(t, fn) }) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 997f02a06f..e4d78529a3 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -27,67 +27,69 @@ import ( "github.com/tendermint/tendermint/types" ) -func echoReactor(channel *p2p.Channel) { - for { - select { - case envelope := <-channel.In: - value := envelope.Message.(*p2ptest.Message).Value - channel.Out <- p2p.Envelope{ - To: envelope.From, - Message: &p2ptest.Message{Value: value}, - } - - case <-channel.Done(): +func echoReactor(ctx context.Context, channel *p2p.Channel) { + iter := channel.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + value := envelope.Message.(*p2ptest.Message).Value + if err := channel.Send(ctx, p2p.Envelope{ + To: envelope.From, + Message: &p2ptest.Message{Value: value}, + }); err != nil { return } } } func TestRouter_Network(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel where all peers run echoReactor. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) - channels := network.MakeChannels(t, chDesc) + channels := network.MakeChannels(ctx, t, chDesc) - network.Start(t) + network.Start(ctx, t) channel := channels[local.NodeID] for _, peer := range peers { - go echoReactor(channels[peer.NodeID]) + go echoReactor(ctx, channels[peer.NodeID]) } // Sending a message to each peer should work. for _, peer := range peers { - p2ptest.RequireSendReceive(t, channel, peer.NodeID, + p2ptest.RequireSendReceive(ctx, t, channel, peer.NodeID, &p2ptest.Message{Value: "foo"}, &p2ptest.Message{Value: "foo"}, ) } // Sending a broadcast should return back a message from all peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ Broadcast: true, Message: &p2ptest.Message{Value: "bar"}, }) - expect := []p2p.Envelope{} + expect := []*p2p.Envelope{} for _, peer := range peers { - expect = append(expect, p2p.Envelope{ - From: peer.NodeID, - Message: &p2ptest.Message{Value: "bar"}, + expect = append(expect, &p2p.Envelope{ + From: peer.NodeID, + ChannelID: 1, + Message: &p2ptest.Message{Value: "bar"}, }) } - p2ptest.RequireReceiveUnordered(t, channel, expect) + p2ptest.RequireReceiveUnordered(ctx, t, channel, expect) // We then submit an error for a peer, and watch it get disconnected and // then reconnected as the router retries it. - peerUpdates := local.MakePeerUpdatesNoRequireEmpty(t) - channel.Error <- p2p.PeerError{ + peerUpdates := local.MakePeerUpdatesNoRequireEmpty(ctx, t) + require.NoError(t, channel.SendError(ctx, p2p.PeerError{ NodeID: peers[0].NodeID, Err: errors.New("boom"), - } + })) p2ptest.RequireUpdates(t, peerUpdates, []p2p.PeerUpdate{ {NodeID: peers[0].NodeID, Status: p2p.PeerStatusDown}, {NodeID: peers[0].NodeID, Status: p2p.PeerStatusUp}, @@ -97,157 +99,172 @@ func TestRouter_Network(t *testing.T) { func TestRouter_Channel_Basic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a router with no transports (so no peers). peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, nil, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) - t.Cleanup(func() { - require.NoError(t, router.Stop()) - }) + require.NoError(t, router.Start(ctx)) + t.Cleanup(router.Wait) // Opening a channel should work. - channel, err := router.OpenChannel(chDesc) + chctx, chcancel := context.WithCancel(ctx) + defer chcancel() + + channel, err := router.OpenChannel(chctx, chDesc) require.NoError(t, err) require.Contains(t, router.NodeInfo().Channels, byte(chDesc.ID)) + require.NotNil(t, channel) // Opening the same channel again should fail. - _, err = router.OpenChannel(chDesc) + _, err = router.OpenChannel(ctx, chDesc) require.Error(t, err) // Opening a different channel should work. chDesc2 := &p2p.ChannelDescriptor{ID: 2, MessageType: &p2ptest.Message{}} - _, err = router.OpenChannel(chDesc2) + _, err = router.OpenChannel(ctx, chDesc2) require.NoError(t, err) require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) // Closing the channel, then opening it again should be fine. - channel.Close() - time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async... + chcancel() + time.Sleep(200 * time.Millisecond) // yes yes, but Close() is async... - channel, err = router.OpenChannel(chDesc) + channel, err = router.OpenChannel(ctx, chDesc) require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "foo"}, }) // A message to ourselves should be dropped. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: selfID, Message: &p2ptest.Message{Value: "self"}, }) - p2ptest.RequireEmpty(t, channel) + p2ptest.RequireEmpty(ctx, t, channel) } // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] - channels := network.MakeChannels(t, chDesc) + channels := network.MakeChannels(ctx, t, chDesc) a, b, c := channels[aID], channels[bID], channels[cID] - otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9)) + otherChannels := network.MakeChannels(ctx, t, p2ptest.MakeChannelDesc(9)) - network.Start(t) + network.Start(ctx, t) // Sending a message a->b should work, and not send anything // further to a, b, or c. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a nil message a->b should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: nil}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: nil}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a different message type should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to an unknown peer should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "a"}, }) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending without a recipient should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to self should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Removing b and sending to it should be dropped. - network.Remove(t, bID) - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) - p2ptest.RequireEmpty(t, a, b, c) + network.Remove(ctx, t, bID) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // After all this, sending a message c->a should work. - p2ptest.RequireSend(t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // None of these messages should have made it onto the other channels. for _, other := range otherChannels { - p2ptest.RequireEmpty(t, other) + p2ptest.RequireEmpty(ctx, t, other) } } func TestRouter_Channel_Broadcast(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 4}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}) ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] - channels := network.MakeChannels(t, chDesc) + channels := network.MakeChannels(ctx, t, chDesc) a, b, c, d := channels[aID], channels[bID], channels[cID], channels[dID] - network.Start(t) + network.Start(ctx, t) // Sending a broadcast from b should work. - p2ptest.RequireSend(t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + p2ptest.RequireSend(ctx, t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) // Removing one node from the network shouldn't prevent broadcasts from working. - network.Remove(t, dID) - p2ptest.RequireSend(t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + network.Remove(ctx, t, dID) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) } func TestRouter_Channel_Wrapper(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 2}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 2}) ids := network.NodeIDs() aID, bID := ids[0], ids[1] @@ -259,28 +276,28 @@ func TestRouter_Channel_Wrapper(t *testing.T) { RecvMessageCapacity: 10, } - channels := network.MakeChannels(t, chDesc) + channels := network.MakeChannels(ctx, t, chDesc) a, b := channels[aID], channels[bID] - network.Start(t) + network.Start(ctx, t) // Since wrapperMessage implements p2p.Wrapper and handles Message, it // should automatically wrap and unwrap sent messages -- we prepend the // wrapper actions to the message value to signal this. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) // If we send a different message that can't be wrapped, it should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, b) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireEmpty(ctx, t, b) // If we send the wrapper message itself, it should also be passed through // since WrapperMessage supports it, and should only be unwrapped at the receiver. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: bID, Message: &wrapperMessage{Message: p2ptest.Message{Value: "foo"}}, }) - p2ptest.RequireReceive(t, b, p2p.Envelope{ + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{ From: aID, Message: &p2ptest.Message{Value: "unwrap:foo"}, }) @@ -313,18 +330,21 @@ func (w *wrapperMessage) Unwrap() (proto.Message, error) { func TestRouter_Channel_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) - network.Start(t) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) + network.Start(ctx, t) ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc) + channels := network.MakeChannels(ctx, t, chDesc) a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. - sub := network.Nodes[aID].MakePeerUpdates(t) - p2ptest.RequireError(t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) + sub := network.Nodes[aID].MakePeerUpdates(ctx, t) + p2ptest.RequireError(ctx, t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) p2ptest.RequireUpdates(t, sub, []p2p.PeerUpdate{ {NodeID: bID, Status: p2p.PeerStatusDown}, {NodeID: bID, Status: p2p.PeerStatusUp}, @@ -352,9 +372,16 @@ func TestRouter_AcceptPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Set up a mock transport that handshakes. @@ -363,40 +390,38 @@ func TestRouter_AcceptPeers(t *testing.T) { mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -406,7 +431,6 @@ func TestRouter_AcceptPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { case <-closer.Done(): @@ -425,31 +449,35 @@ func TestRouter_AcceptPeers(t *testing.T) { func TestRouter_AcceptPeers_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns an error, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, errors.New("boom")) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom")) mockTransport.On("Close").Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -459,31 +487,35 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns io.EOF once, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) mockTransport.On("Close").Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) require.NoError(t, router.Stop()) @@ -493,6 +525,9 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns a connection that blocks during the // handshake. It should be able to accept several of these in parallel, i.e. // a single connection can't halt other connections being accepted. @@ -510,31 +545,32 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Times(3).Run(func(_ mock.Arguments) { + mockTransport.On("Accept", mock.Anything).Times(3).Run(func(_ mock.Arguments) { acceptCh <- true }).Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { return len(acceptCh) == 3 - }, time.Second, 10*time.Millisecond) + }, time.Second, 10*time.Millisecond, "num", len(acceptCh)) close(closeCh) time.Sleep(100 * time.Millisecond) @@ -569,10 +605,16 @@ func TestRouter_DialPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(bctx) + defer cancel() address := p2p.NodeAddress{Protocol: "mock", NodeID: tc.dialID} endpoint := p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} @@ -584,19 +626,17 @@ func TestRouter_DialPeers(t *testing.T) { if tc.dialErr == nil { mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil).Maybe() } if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) if tc.dialErr == nil { mockTransport.On("Dial", mock.Anything, endpoint).Once().Return(mockConnection, nil) // This handles the retry when a dialed connection gets closed after ReceiveMessage @@ -611,25 +651,25 @@ func TestRouter_DialPeers(t *testing.T) { // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(address) require.NoError(t, err) require.True(t, added) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -639,7 +679,6 @@ func TestRouter_DialPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { case <-closer.Done(): @@ -658,6 +697,9 @@ func TestRouter_DialPeers(t *testing.T) { func TestRouter_DialPeers_Parallel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -677,7 +719,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) for _, address := range []p2p.NodeAddress{a, b, c} { endpoint := p2p.Endpoint{Protocol: address.Protocol, Path: string(address.NodeID)} mockTransport.On("Dial", mock.Anything, endpoint).Run(func(_ mock.Arguments) { @@ -688,7 +730,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(a) require.NoError(t, err) @@ -703,12 +744,14 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { require.True(t, added) router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{ DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { @@ -722,7 +765,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { @@ -743,6 +786,9 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { func TestRouter_EvictPeers(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that we can evict. closeCh := make(chan time.Time) closeOnce := sync.Once{} @@ -751,7 +797,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(peerInfo, peerKey.PubKey(), nil) - mockConnection.On("ReceiveMessage").WaitUntil(closeCh).Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Run(func(_ mock.Arguments) { closeOnce.Do(func() { @@ -763,28 +809,28 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) // Wait for the mock peer to connect, then evict it by reporting an error. p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -798,7 +844,6 @@ func TestRouter_EvictPeers(t *testing.T) { NodeID: peerInfo.NodeID, Status: p2p.PeerStatusDown, }) - sub.Close() require.NoError(t, router.Stop()) mockTransport.AssertExpectations(t) @@ -807,6 +852,8 @@ func TestRouter_EvictPeers(t *testing.T) { func TestRouter_ChannelCompatability(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() incompatiblePeer := types.NodeInfo{ NodeID: peerID, @@ -827,25 +874,26 @@ func TestRouter_ChannelCompatability(t *testing.T) { mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(1 * time.Second) require.NoError(t, router.Stop()) require.Empty(t, peerManager.Peers()) @@ -856,6 +904,8 @@ func TestRouter_ChannelCompatability(t *testing.T) { func TestRouter_DontSendOnInvalidChannel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() peer := types.NodeInfo{ NodeID: peerID, @@ -871,48 +921,48 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { Return(peer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( + ctx, log.TestingLogger(), p2p.NopMetrics(), selfInfo, selfKey, peerManager, []p2p.Transport{mockTransport}, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ NodeID: peerInfo.NodeID, Status: p2p.PeerStatusUp, }) - channel, err := router.OpenChannel(chDesc) + channel, err := router.OpenChannel(ctx, chDesc) require.NoError(t, err) - channel.Out <- p2p.Envelope{ + require.NoError(t, channel.Send(ctx, p2p.Envelope{ To: peer.NodeID, Message: &p2ptest.Message{Value: "Hi"}, - } + })) require.NoError(t, router.Stop()) mockTransport.AssertExpectations(t) diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index e78906362c..041bbda3af 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -23,6 +23,9 @@ type Protocol string // Transport is a connection-oriented mechanism for exchanging data with a peer. type Transport interface { + // Listen starts the transport on the specified endpoint. + Listen(Endpoint) error + // Protocols returns the protocols supported by the transport. The Router // uses this to pick a transport for an Endpoint. Protocols() []Protocol @@ -36,7 +39,7 @@ type Transport interface { // Accept waits for the next inbound connection on a listening endpoint, blocking // until either a connection is available or the transport is closed. On closure, // io.EOF is returned and further Accept calls are futile. - Accept() (Connection, error) + Accept(context.Context) (Connection, error) // Dial creates an outbound connection to an endpoint. Dial(context.Context, Endpoint) (Connection, error) @@ -82,10 +85,10 @@ type Connection interface { // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. - ReceiveMessage() (ChannelID, []byte, error) + ReceiveMessage(context.Context) (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - SendMessage(ChannelID, []byte) error + SendMessage(context.Context, ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 3e0281c39d..46227ff8fc 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -44,10 +44,10 @@ type MConnTransport struct { options MConnTransportOptions mConnConfig conn.MConnConfig channelDescs []*ChannelDescriptor - closeCh chan struct{} - closeOnce sync.Once - listener net.Listener + closeOnce sync.Once + doneCh chan struct{} + listener net.Listener } // NewMConnTransport sets up a new MConnection transport. This uses the @@ -63,7 +63,7 @@ func NewMConnTransport( logger: logger, options: options, mConnConfig: mConnConfig, - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), channelDescs: channelDescs, } } @@ -84,10 +84,11 @@ func (m *MConnTransport) Endpoints() []Endpoint { return []Endpoint{} } select { - case <-m.closeCh: + case <-m.doneCh: return []Endpoint{} default: } + endpoint := Endpoint{ Protocol: MConnProtocol, } @@ -132,7 +133,7 @@ func (m *MConnTransport) Listen(endpoint Endpoint) error { } // Accept implements Transport. -func (m *MConnTransport) Accept() (Connection, error) { +func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { if m.listener == nil { return nil, errors.New("transport is not listening") } @@ -140,7 +141,9 @@ func (m *MConnTransport) Accept() (Connection, error) { tcpConn, err := m.listener.Accept() if err != nil { select { - case <-m.closeCh: + case <-ctx.Done(): + return nil, io.EOF + case <-m.doneCh: return nil, io.EOF default: return nil, err @@ -178,7 +181,7 @@ func (m *MConnTransport) Dial(ctx context.Context, endpoint Endpoint) (Connectio func (m *MConnTransport) Close() error { var err error m.closeOnce.Do(func() { - close(m.closeCh) // must be closed first, to handle error in Accept() + close(m.doneCh) if m.listener != nil { err = m.listener.Close() } @@ -222,7 +225,7 @@ type mConnConnection struct { channelDescs []*ChannelDescriptor receiveCh chan mConnMessage errorCh chan error - closeCh chan struct{} + doneCh chan struct{} closeOnce sync.Once mconn *conn.MConnection // set during Handshake() @@ -248,7 +251,7 @@ func newMConnConnection( channelDescs: channelDescs, receiveCh: make(chan mConnMessage), errorCh: make(chan error, 1), // buffered to avoid onError leak - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), } } @@ -277,7 +280,12 @@ func (c *mConnConnection) Handshake( }() var err error mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey) - errCh <- err + + select { + case errCh <- err: + case <-ctx.Done(): + } + }() select { @@ -290,8 +298,7 @@ func (c *mConnConnection) Handshake( return types.NodeInfo{}, nil, err } c.mconn = mconn - c.logger = mconn.Logger - if err = c.mconn.Start(); err != nil { + if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } return peerInfo, peerKey, nil @@ -315,49 +322,67 @@ func (c *mConnConnection) handshake( return nil, types.NodeInfo{}, nil, err } + wg := &sync.WaitGroup{} var pbPeerInfo p2pproto.NodeInfo errCh := make(chan error, 2) + wg.Add(1) go func() { + defer wg.Done() _, err := protoio.NewDelimitedWriter(secretConn).WriteMsg(nodeInfo.ToProto()) - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } + }() + wg.Add(1) go func() { + defer wg.Done() _, err := protoio.NewDelimitedReader(secretConn, types.MaxNodeInfoSize()).ReadMsg(&pbPeerInfo) - errCh <- err - }() - for i := 0; i < cap(errCh); i++ { - if err = <-errCh; err != nil { - return nil, types.NodeInfo{}, nil, err + select { + case errCh <- err: + case <-ctx.Done(): } + }() + + wg.Wait() + + if err, ok := <-errCh; ok && err != nil { + return nil, types.NodeInfo{}, nil, err + } + + if err := ctx.Err(); err != nil { + return nil, types.NodeInfo{}, nil, err } + peerInfo, err := types.NodeInfoFromProto(&pbPeerInfo) if err != nil { return nil, types.NodeInfo{}, nil, err } mconn := conn.NewMConnectionWithConfig( + c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)), secretConn, c.channelDescs, c.onReceive, c.onError, c.mConnConfig, ) - mconn.SetLogger(c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID))) return mconn, peerInfo, secretConn.RemotePubKey(), nil } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID ChannelID, payload []byte) { +func (c *mConnConnection) onReceive(ctx context.Context, chID ChannelID, payload []byte) { select { case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: - case <-c.closeCh: + case <-ctx.Done(): } } // onError is a callback for MConnection errors. The error is passed via errorCh // to ReceiveMessage (but not SendMessage, for legacy P2P stack behavior). -func (c *mConnConnection) onError(e interface{}) { +func (c *mConnConnection) onError(ctx context.Context, e interface{}) { err, ok := e.(error) if !ok { err = fmt.Errorf("%v", err) @@ -367,7 +392,7 @@ func (c *mConnConnection) onError(e interface{}) { _ = c.Close() select { case c.errorCh <- err: - case <-c.closeCh: + case <-ctx.Done(): } } @@ -377,14 +402,14 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { +func (c *mConnConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: return err - case <-c.closeCh: + case <-ctx.Done(): return io.EOF default: if ok := c.mconn.Send(chID, msg); !ok { @@ -396,11 +421,13 @@ func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { } // ReceiveMessage implements Connection. -func (c *mConnConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *mConnConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { select { case err := <-c.errorCh: return 0, nil, err - case <-c.closeCh: + case <-c.doneCh: + return 0, nil, io.EOF + case <-ctx.Done(): return 0, nil, io.EOF case msg := <-c.receiveCh: return msg.channelID, msg.payload, nil @@ -440,7 +467,7 @@ func (c *mConnConnection) Close() error { } else { err = c.conn.Close() } - close(c.closeCh) + close(c.doneCh) }) return err } diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index d334381096..0851fe0e21 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "io" "net" "testing" @@ -51,13 +52,18 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { t.Cleanup(func() { _ = transport.Close() }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, err := transport.Accept() + _, err := transport.Accept(ctx) require.Error(t, err) require.NotEqual(t, io.EOF, err) // io.EOF should be returned after Close() } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), @@ -81,7 +87,7 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { acceptCh := make(chan p2p.Connection, 10) go func() { for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) if err != nil { return } @@ -124,6 +130,9 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { } func TestMConnTransport_Listen(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testcases := []struct { endpoint p2p.Endpoint ok bool @@ -145,6 +154,9 @@ func TestMConnTransport_Listen(t *testing.T) { t.Run(tc.endpoint.String(), func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel = context.WithCancel(ctx) + defer cancel() + transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), @@ -185,12 +197,15 @@ func TestMConnTransport_Listen(t *testing.T) { go func() { // Dialing the endpoint should work. var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerConn, err = transport.Dial(ctx, endpoint) require.NoError(t, err) close(dialedChan) }() - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) require.NoError(t, err) _ = conn.Close() <-dialedChan @@ -199,7 +214,7 @@ func TestMConnTransport_Listen(t *testing.T) { require.NoError(t, peerConn.Close()) // try to read from the connection should error - _, _, err = peerConn.ReceiveMessage() + _, _, err = peerConn.ReceiveMessage(ctx) require.Error(t, err) // Trying to listen again should error. diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index b4161ecd65..27b9e77e11 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -94,9 +94,7 @@ type MemoryTransport struct { nodeID types.NodeID bufferSize int - acceptCh chan *MemoryConnection - closeCh chan struct{} - closeOnce sync.Once + acceptCh chan *MemoryConnection } // newMemoryTransport creates a new MemoryTransport. This is for internal use by @@ -108,7 +106,6 @@ func newMemoryTransport(network *MemoryNetwork, nodeID types.NodeID) *MemoryTran nodeID: nodeID, bufferSize: network.bufferSize, acceptCh: make(chan *MemoryConnection), - closeCh: make(chan struct{}), } } @@ -117,6 +114,8 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (*MemoryTransport) Listen(Endpoint) error { return nil } + func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} // Protocols implements Transport. @@ -126,28 +125,27 @@ func (t *MemoryTransport) Protocols() []Protocol { // Endpoints implements Transport. func (t *MemoryTransport) Endpoints() []Endpoint { - select { - case <-t.closeCh: + if n := t.network.GetTransport(t.nodeID); n == nil { return []Endpoint{} - default: - return []Endpoint{{ - Protocol: MemoryProtocol, - Path: string(t.nodeID), - // An arbitrary IP and port is used in order for the pex - // reactor to be able to send addresses to one another. - IP: net.IPv4zero, - Port: 0, - }} } + + return []Endpoint{{ + Protocol: MemoryProtocol, + Path: string(t.nodeID), + // An arbitrary IP and port is used in order for the pex + // reactor to be able to send addresses to one another. + IP: net.IPv4zero, + Port: 0, + }} } // Accept implements Transport. -func (t *MemoryTransport) Accept() (Connection, error) { +func (t *MemoryTransport) Accept(ctx context.Context) (Connection, error) { select { case conn := <-t.acceptCh: t.logger.Info("accepted connection", "remote", conn.RemoteEndpoint().Path) return conn, nil - case <-t.closeCh: + case <-ctx.Done(): return nil, io.EOF } } @@ -185,20 +183,14 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti select { case peer.acceptCh <- inConn: return outConn, nil - case <-peer.closeCh: - return nil, io.EOF case <-ctx.Done(): - return nil, ctx.Err() + return nil, io.EOF } } // Close implements Transport. func (t *MemoryTransport) Close() error { t.network.RemoveTransport(t.nodeID) - t.closeOnce.Do(func() { - close(t.closeCh) - t.logger.Info("closed transport") - }) return nil } @@ -293,12 +285,14 @@ func (c *MemoryConnection) Handshake( } // ReceiveMessage implements Connection. -func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *MemoryConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): return 0, nil, io.EOF + case <-ctx.Done(): + return 0, nil, io.EOF default: } @@ -312,12 +306,14 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { +func (c *MemoryConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): return io.EOF + case <-ctx.Done(): + return io.EOF default: } @@ -325,6 +321,8 @@ func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: c.logger.Debug("sent message", "chID", chID, "msg", msg) return nil + case <-ctx.Done(): + return io.EOF case <-c.closer.Done(): return io.EOF } diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index cdfb57c70b..63ce5ad5b3 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -25,36 +25,44 @@ var testTransports = map[string]transportFactory{} // withTransports is a test helper that runs a test against all transports // registered in testTransports. -func withTransports(t *testing.T, tester func(*testing.T, transportFactory)) { +func withTransports(ctx context.Context, t *testing.T, tester func(context.Context, *testing.T, transportFactory)) { t.Helper() for name, transportFactory := range testTransports { transportFactory := transportFactory t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) - tester(t, transportFactory) + tctx, cancel := context.WithCancel(ctx) + defer cancel() + + tester(tctx, t, transportFactory) }) } } func TestTransport_AcceptClose(t *testing.T) { // Just test accept unblock on close, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) + opctx, opcancel := context.WithCancel(ctx) // In-progress Accept should error on concurrent close. errCh := make(chan error, 1) go func() { time.Sleep(200 * time.Millisecond) + opcancel() errCh <- a.Close() }() - _, err := a.Accept() + _, err := a.Accept(opctx) require.Error(t, err) require.Equal(t, io.EOF, err) require.NoError(t, <-errCh) // Closed transport should return error immediately. - _, err = a.Accept() + _, err = a.Accept(opctx) require.Error(t, err) require.Equal(t, io.EOF, err) }) @@ -75,7 +83,10 @@ func TestTransport_DialEndpoints(t *testing.T) { {[]byte{1, 2, 3, 4, 5}, false}, } - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) endpoints := a.Endpoints() require.NotEmpty(t, endpoints) @@ -84,7 +95,7 @@ func TestTransport_DialEndpoints(t *testing.T) { // Spawn a goroutine to simply accept any connections until closed. go func() { for { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err != nil { return } @@ -149,8 +160,11 @@ func TestTransport_DialEndpoints(t *testing.T) { } func TestTransport_Dial(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Most just tests dial failures, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -165,7 +179,6 @@ func TestTransport_Dial(t *testing.T) { cancel() _, err := a.Dial(cancelCtx, bEndpoint) require.Error(t, err) - require.Equal(t, err, context.Canceled) // Unavailable endpoint should error. err = b.Close() @@ -176,7 +189,7 @@ func TestTransport_Dial(t *testing.T) { // Dialing from a closed transport should still work. errCh := make(chan error, 1) go func() { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err == nil { _ = conn.Close() } @@ -190,7 +203,10 @@ func TestTransport_Dial(t *testing.T) { } func TestTransport_Endpoints(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) @@ -214,7 +230,10 @@ func TestTransport_Endpoints(t *testing.T) { } func TestTransport_Protocols(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) protocols := a.Protocols() endpoints := a.Endpoints() @@ -228,17 +247,23 @@ func TestTransport_Protocols(t *testing.T) { } func TestTransport_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) require.NotEmpty(t, a.String()) }) } func TestConnection_Handshake(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) // A handshake should pass the given keys and NodeInfo. aKey := ed25519.GenPrivKey() @@ -270,7 +295,10 @@ func TestConnection_Handshake(t *testing.T) { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) } - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } }() peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) @@ -283,12 +311,15 @@ func TestConnection_Handshake(t *testing.T) { } func TestConnection_HandshakeCancel(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) // Handshake should error on context cancellation. - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -298,7 +329,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { _ = ba.Close() // Handshake should error on context timeout. - ab, ba = dialAccept(t, a, b) + ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -310,29 +341,34 @@ func TestConnection_HandshakeCancel(t *testing.T) { } func TestConnection_FlushClose(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) + ab, _ := dialAcceptHandshake(ctx, t, a, b) err := ab.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) - require.Equal(t, io.EOF, err) }) } func TestConnection_LocalRemoteEndpoint(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Local and remote connection endpoints correspond to each other. require.NotEmpty(t, ab.LocalEndpoint()) @@ -343,25 +379,28 @@ func TestConnection_LocalRemoteEndpoint(t *testing.T) { } func TestConnection_SendReceive(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Can send and receive a to b. - err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(ctx, chID, []byte("foo")) require.NoError(t, err) - ch, msg, err := ba.ReceiveMessage() + ch, msg, err := ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("foo"), msg) require.Equal(t, chID, ch) // Can send and receive b to a. - err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(ctx, chID, []byte("bar")) require.NoError(t, err) - _, msg, err = ab.ReceiveMessage() + _, msg, err = ab.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("bar"), msg) @@ -371,9 +410,9 @@ func TestConnection_SendReceive(t *testing.T) { err = b.Close() require.NoError(t, err) - err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(ctx, chID, []byte("still here")) require.NoError(t, err) - ch, msg, err = ba.ReceiveMessage() + ch, msg, err = ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, chID, ch) require.Equal(t, []byte("still here"), msg) @@ -383,29 +422,31 @@ func TestConnection_SendReceive(t *testing.T) { err = ba.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, _, err = ba.ReceiveMessage() + _, _, err = ba.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - err = ba.SendMessage(chID, []byte("closed")) + err = ba.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) - require.Equal(t, io.EOF, err) }) } func TestConnection_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAccept(t, a, b) + ab, _ := dialAccept(ctx, t, a, b) require.NotEmpty(t, ab.String()) }) } @@ -552,7 +593,7 @@ func TestEndpoint_Validate(t *testing.T) { // dialAccept is a helper that dials b from a and returns both sides of the // connection. -func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAccept(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() endpoints := b.Endpoints() @@ -564,7 +605,7 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio acceptCh := make(chan p2p.Connection, 1) errCh := make(chan error, 1) go func() { - conn, err := b.Accept() + conn, err := b.Accept(ctx) errCh <- err acceptCh <- conn }() @@ -585,13 +626,10 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio // dialAcceptHandshake is a helper that dials and handshakes b from a and // returns both sides of the connection. -func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() - ab, ba := dialAccept(t, a, b) - - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() + ab, ba := dialAccept(ctx, t, a, b) errCh := make(chan error, 1) go func() { diff --git a/internal/p2p/trust/config.go b/internal/p2p/trust/config.go deleted file mode 100644 index 0f990a991d..0000000000 --- a/internal/p2p/trust/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package trust - -import "time" - -// MetricConfig - Configures the weight functions and time intervals for the metric -type MetricConfig struct { - // Determines the percentage given to current behavior - ProportionalWeight float64 - - // Determines the percentage given to prior behavior - IntegralWeight float64 - - // The window of time that the trust metric will track events across. - // This can be set to cover many days without issue - TrackingWindow time.Duration - - // Each interval should be short for adapability. - // Less than 30 seconds is too sensitive, - // and greater than 5 minutes will make the metric numb - IntervalLength time.Duration -} - -// DefaultConfig returns a config with values that have been tested and produce desirable results -func DefaultConfig() MetricConfig { - return MetricConfig{ - ProportionalWeight: 0.4, - IntegralWeight: 0.6, - TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. - IntervalLength: 1 * time.Minute, - } -} - -// Ensures that all configuration elements have valid values -func customConfig(tmc MetricConfig) MetricConfig { - config := DefaultConfig() - - // Check the config for set values, and setup appropriately - if tmc.ProportionalWeight > 0 { - config.ProportionalWeight = tmc.ProportionalWeight - } - - if tmc.IntegralWeight > 0 { - config.IntegralWeight = tmc.IntegralWeight - } - - if tmc.IntervalLength > time.Duration(0) { - config.IntervalLength = tmc.IntervalLength - } - - if tmc.TrackingWindow > time.Duration(0) && - tmc.TrackingWindow >= config.IntervalLength { - config.TrackingWindow = tmc.TrackingWindow - } - return config -} diff --git a/internal/p2p/trust/metric.go b/internal/p2p/trust/metric.go deleted file mode 100644 index aa0ff52986..0000000000 --- a/internal/p2p/trust/metric.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "math" - "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" -) - -//--------------------------------------------------------------------------------------- - -const ( - // The weight applied to the derivative when current behavior is >= previous behavior - defaultDerivativeGamma1 = 0 - - // The weight applied to the derivative when current behavior is less than previous behavior - defaultDerivativeGamma2 = 1.0 - - // The weight applied to history data values when calculating the history value - defaultHistoryDataWeight = 0.8 -) - -// MetricHistoryJSON - history data necessary to save the trust metric -type MetricHistoryJSON struct { - NumIntervals int `json:"intervals"` - History []float64 `json:"history"` -} - -// Metric - keeps track of peer reliability -// See tendermint/docs/architecture/adr-006-trust-metric.md for details -type Metric struct { - service.BaseService - - // Mutex that protects the metric from concurrent access - mtx tmsync.Mutex - - // Determines the percentage given to current behavior - proportionalWeight float64 - - // Determines the percentage given to prior behavior - integralWeight float64 - - // Count of how many time intervals this metric has been tracking - numIntervals int - - // Size of the time interval window for this trust metric - maxIntervals int - - // The time duration for a single time interval - intervalLen time.Duration - - // Stores the trust history data for this metric - history []float64 - - // Weights applied to the history data when calculating the history value - historyWeights []float64 - - // The sum of the history weights used when calculating the history value - historyWeightSum float64 - - // The current number of history data elements - historySize int - - // The maximum number of history data elements - historyMaxSize int - - // The calculated history value for the current time interval - historyValue float64 - - // The number of recorded good and bad events for the current time interval - bad, good float64 - - // While true, history data is not modified - paused bool - - // Used during testing in order to control the passing of time intervals - testTicker MetricTicker -} - -// NewMetric returns a trust metric with the default configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetric() *Metric { - return NewMetricWithConfig(DefaultConfig()) -} - -// NewMetricWithConfig returns a trust metric with a custom configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetricWithConfig(tmc MetricConfig) *Metric { - tm := new(Metric) - config := customConfig(tmc) - - // Setup using the configuration values - tm.proportionalWeight = config.ProportionalWeight - tm.integralWeight = config.IntegralWeight - tm.intervalLen = config.IntervalLength - // The maximum number of time intervals is the tracking window / interval length - tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) - // The history size will be determined by the maximum number of time intervals - tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 - // This metric has a perfect history so far - tm.historyValue = 1.0 - - tm.BaseService = *service.NewBaseService(nil, "Metric", tm) - return tm -} - -// OnStart implements Service -func (tm *Metric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { - return err - } - go tm.processRequests() - return nil -} - -// OnStop implements Service -// Nothing to do since the goroutine shuts down by itself via BaseService.Quit() -func (tm *Metric) OnStop() {} - -// Returns a snapshot of the trust metric history data -func (tm *Metric) HistoryJSON() MetricHistoryJSON { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return MetricHistoryJSON{ - NumIntervals: tm.numIntervals, - History: tm.history, - } -} - -// Instantiates a trust metric by loading the history data for a single peer. -// This is called only once and only right after creation, which is why the -// lock is not held while accessing the trust metric struct members -func (tm *Metric) Init(hist MetricHistoryJSON) { - // Restore the number of time intervals we have previously tracked - if hist.NumIntervals > tm.maxIntervals { - hist.NumIntervals = tm.maxIntervals - } - tm.numIntervals = hist.NumIntervals - // Restore the history and its current size - if len(hist.History) > tm.historyMaxSize { - // Keep the history no larger than historyMaxSize - last := len(hist.History) - tm.historyMaxSize - hist.History = hist.History[last:] - } - tm.history = hist.History - tm.historySize = len(tm.history) - // Create the history weight values and weight sum - for i := 1; i <= tm.numIntervals; i++ { - x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight - tm.historyWeights = append(tm.historyWeights, x) - } - - for _, v := range tm.historyWeights { - tm.historyWeightSum += v - } - // Calculate the history value based on the loaded history data - tm.historyValue = tm.calcHistoryValue() -} - -// Pause tells the metric to pause recording data over time intervals. -// All method calls that indicate events will unpause the metric -func (tm *Metric) Pause() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - // Pause the metric for now - tm.paused = true -} - -// BadEvents indicates that an undesirable event(s) took place -func (tm *Metric) BadEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.bad += float64(num) -} - -// GoodEvents indicates that a desirable event(s) took place -func (tm *Metric) GoodEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.good += float64(num) -} - -// TrustValue gets the dependable trust value; always between 0 and 1 -func (tm *Metric) TrustValue() float64 { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return tm.calcTrustValue() -} - -// TrustScore gets a score based on the trust value always between 0 and 100 -func (tm *Metric) TrustScore() int { - score := tm.TrustValue() * 100 - - return int(math.Floor(score)) -} - -// NextTimeInterval saves current time interval data and prepares for the following interval -func (tm *Metric) NextTimeInterval() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - if tm.paused { - // Do not prepare for the next time interval while paused - return - } - - // Add the current trust value to the history data - newHist := tm.calcTrustValue() - tm.history = append(tm.history, newHist) - - // Update history and interval counters - if tm.historySize < tm.historyMaxSize { - tm.historySize++ - } else { - // Keep the history no larger than historyMaxSize - last := len(tm.history) - tm.historyMaxSize - tm.history = tm.history[last:] - } - - if tm.numIntervals < tm.maxIntervals { - tm.numIntervals++ - // Add the optimistic weight for the new time interval - wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) - tm.historyWeights = append(tm.historyWeights, wk) - tm.historyWeightSum += wk - } - - // Update the history data using Faded Memories - tm.updateFadedMemory() - // Calculate the history value for the upcoming time interval - tm.historyValue = tm.calcHistoryValue() - tm.good = 0 - tm.bad = 0 -} - -// SetTicker allows a TestTicker to be provided that will manually control -// the passing of time from the perspective of the Metric. -// The ticker must be set before Start is called on the metric -func (tm *Metric) SetTicker(ticker MetricTicker) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.testTicker = ticker -} - -// Copy returns a new trust metric with members containing the same values -func (tm *Metric) Copy() *Metric { - if tm == nil { - return nil - } - - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return &Metric{ - proportionalWeight: tm.proportionalWeight, - integralWeight: tm.integralWeight, - numIntervals: tm.numIntervals, - maxIntervals: tm.maxIntervals, - intervalLen: tm.intervalLen, - history: tm.history, - historyWeights: tm.historyWeights, - historyWeightSum: tm.historyWeightSum, - historySize: tm.historySize, - historyMaxSize: tm.historyMaxSize, - historyValue: tm.historyValue, - good: tm.good, - bad: tm.bad, - paused: tm.paused, - } - -} - -/* Private methods */ - -// This method is for a goroutine that handles all requests on the metric -func (tm *Metric) processRequests() { - t := tm.testTicker - if t == nil { - // No test ticker was provided, so we create a normal ticker - t = NewTicker(tm.intervalLen) - } - defer t.Stop() - // Obtain the raw channel - tick := t.GetChannel() -loop: - for { - select { - case <-tick: - tm.NextTimeInterval() - case <-tm.Quit(): - // Stop all further tracking for this metric - break loop - } - } -} - -// Wakes the trust metric up if it is currently paused -// This method needs to be called with the mutex locked -func (tm *Metric) unpause() { - // Check if this is the first experience with - // what we are tracking since being paused - if tm.paused { - tm.good = 0 - tm.bad = 0 - // New events cause us to unpause the metric - tm.paused = false - } -} - -// Calculates the trust value for the request processing -func (tm *Metric) calcTrustValue() float64 { - weightedP := tm.proportionalWeight * tm.proportionalValue() - weightedI := tm.integralWeight * tm.historyValue - weightedD := tm.weightedDerivative() - - tv := weightedP + weightedI + weightedD - // Do not return a negative value. - if tv < 0 { - tv = 0 - } - return tv -} - -// Calculates the current score for good/bad experiences -func (tm *Metric) proportionalValue() float64 { - value := 1.0 - - total := tm.good + tm.bad - if total > 0 { - value = tm.good / total - } - return value -} - -// Strengthens the derivative component when the change is negative -func (tm *Metric) weightedDerivative() float64 { - var weight float64 = defaultDerivativeGamma1 - - d := tm.derivativeValue() - if d < 0 { - weight = defaultDerivativeGamma2 - } - return weight * d -} - -// Calculates the derivative component -func (tm *Metric) derivativeValue() float64 { - return tm.proportionalValue() - tm.historyValue -} - -// Calculates the integral (history) component of the trust value -func (tm *Metric) calcHistoryValue() float64 { - var hv float64 - - for i := 0; i < tm.numIntervals; i++ { - hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] - } - - return hv / tm.historyWeightSum -} - -// Retrieves the actual history data value that represents the requested time interval -func (tm *Metric) fadedMemoryValue(interval int) float64 { - first := tm.historySize - 1 - - if interval == 0 { - // Base case - return tm.history[first] - } - - offset := intervalToHistoryOffset(interval) - return tm.history[first-offset] -} - -// Performs the update for our Faded Memories process, which allows the -// trust metric tracking window to be large while maintaining a small -// number of history data values -func (tm *Metric) updateFadedMemory() { - if tm.historySize < 2 { - return - } - - end := tm.historySize - 1 - // Keep the most recent history element - for count := 1; count < tm.historySize; count++ { - i := end - count - // The older the data is, the more we spread it out - x := math.Pow(2, float64(count)) - // Two history data values are merged into a single value - tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x - } -} - -// Map the interval value down to an offset from the beginning of history -func intervalToHistoryOffset(interval int) int { - // The system maintains 2^m interval values in the form of m history - // data values. Therefore, we access the ith interval by obtaining - // the history data index = the floor of log2(i) - return int(math.Floor(math.Log2(float64(interval)))) -} diff --git a/internal/p2p/trust/metric_test.go b/internal/p2p/trust/metric_test.go deleted file mode 100644 index 65caf38a23..0000000000 --- a/internal/p2p/trust/metric_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package trust - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTrustMetricScores(t *testing.T) { - tm := NewMetric() - err := tm.Start() - require.NoError(t, err) - - // Perfect score - tm.GoodEvents(1) - score := tm.TrustScore() - assert.Equal(t, 100, score) - - // Less than perfect score - tm.BadEvents(10) - score = tm.TrustScore() - assert.NotEqual(t, 100, score) - err = tm.Stop() - require.NoError(t, err) -} - -func TestTrustMetricConfig(t *testing.T) { - // 7 days - window := time.Minute * 60 * 24 * 7 - config := MetricConfig{ - TrackingWindow: window, - IntervalLength: 2 * time.Minute, - } - - tm := NewMetricWithConfig(config) - err := tm.Start() - require.NoError(t, err) - - // The max time intervals should be the TrackingWindow / IntervalLen - assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) - - dc := DefaultConfig() - // These weights should still be the default values - assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, dc.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - config.ProportionalWeight = 0.3 - config.IntegralWeight = 0.7 - tm = NewMetricWithConfig(config) - err = tm.Start() - require.NoError(t, err) - - // These weights should be equal to our custom values - assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, config.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() -} - -func TestTrustMetricCopyNilPointer(t *testing.T) { - var tm *Metric - - ctm := tm.Copy() - - assert.Nil(t, ctm) -} - -// XXX: This test fails non-deterministically -//nolint:unused,deadcode -func _TestTrustMetricStopPause(t *testing.T) { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt := NewTestTicker() - tm := NewMetric() - tm.SetTicker(tt) - err := tm.Start() - require.NoError(t, err) - // Allow some time intervals to pass and pause - tt.NextTick() - tt.NextTick() - tm.Pause() - - // could be 1 or 2 because Pause and NextTick race - first := tm.Copy().numIntervals - - // Allow more time to pass and check the intervals are unchanged - tt.NextTick() - tt.NextTick() - assert.Equal(t, first, tm.Copy().numIntervals) - - // Get the trust metric activated again - tm.GoodEvents(5) - // Allow some time intervals to pass and stop - tt.NextTick() - tt.NextTick() - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - second := tm.Copy().numIntervals - // Allow more intervals to pass while the metric is stopped - // and check that the number of intervals match - tm.NextTimeInterval() - tm.NextTimeInterval() - // XXX: fails non-deterministically: - // expected 5, got 6 - assert.Equal(t, second+2, tm.Copy().numIntervals) - - if first > second { - t.Fatalf("numIntervals should always increase or stay the same over time") - } -} diff --git a/internal/p2p/trust/store.go b/internal/p2p/trust/store.go deleted file mode 100644 index 9f200b9dd5..0000000000 --- a/internal/p2p/trust/store.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "encoding/json" - "fmt" - "time" - - dbm "github.com/tendermint/tm-db" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultStorePeriodicSaveInterval = 1 * time.Minute - -var trustMetricKey = []byte("trustMetricStore") - -// MetricStore - Manages all trust metrics for peers -type MetricStore struct { - service.BaseService - - // Maps a Peer.Key to that peer's TrustMetric - peerMetrics map[string]*Metric - - // Mutex that protects the map and history data file - mtx tmsync.Mutex - - // The db where peer trust metric history data will be stored - db dbm.DB - - // This configuration will be used when creating new TrustMetrics - config MetricConfig -} - -// NewTrustMetricStore returns a store that saves data to the DB -// and uses the config when creating new trust metrics. -// Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { - tms := &MetricStore{ - peerMetrics: make(map[string]*Metric), - db: db, - config: tmc, - } - - tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms) - return tms -} - -// OnStart implements Service -func (tms *MetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { - return err - } - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.loadFromDB() - go tms.saveRoutine() - return nil -} - -// OnStop implements Service -func (tms *MetricStore) OnStop() { - tms.BaseService.OnStop() - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // Stop all trust metric go-routines - for _, tm := range tms.peerMetrics { - if err := tm.Stop(); err != nil { - tms.Logger.Error("unable to stop metric store", "error", err) - } - } - - // Make the final trust history data save - tms.saveToDB() -} - -// Size returns the number of entries in the trust metric store -func (tms *MetricStore) Size() int { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - return tms.size() -} - -// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key. -// The caller is expected to call Start on the TrustMetric being added -func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - if key == "" || tm == nil { - return - } - tms.peerMetrics[key] = tm -} - -// GetPeerTrustMetric returns a trust metric by peer key -func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tm, ok := tms.peerMetrics[key] - if !ok { - // If the metric is not available, we will create it - tm = NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric store", "error", err) - } - // The metric needs to be in the map - tms.peerMetrics[key] = tm - } - return tm -} - -// PeerDisconnected pauses the trust metric associated with the peer identified by the key -func (tms *MetricStore) PeerDisconnected(key string) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // If the Peer that disconnected has a metric, pause it - if tm, ok := tms.peerMetrics[key]; ok { - tm.Pause() - } -} - -// Saves the history data for all peers to the store DB. -// This public method acquires the trust metric store lock -func (tms *MetricStore) SaveToDB() { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.saveToDB() -} - -/* Private methods */ - -// size returns the number of entries in the store without acquiring the mutex -func (tms *MetricStore) size() int { - return len(tms.peerMetrics) -} - -/* Loading & Saving */ -/* Both loadFromDB and savetoDB assume the mutex has been acquired */ - -// Loads the history data for all peers from the store DB -// cmn.Panics if file is corrupt -func (tms *MetricStore) loadFromDB() bool { - // Obtain the history data we have so far - bytes, err := tms.db.Get(trustMetricKey) - if err != nil { - panic(err) - } - if bytes == nil { - return false - } - - peers := make(map[string]MetricHistoryJSON) - err = json.Unmarshal(bytes, &peers) - if err != nil { - panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) - } - - // If history data exists in the file, - // load it into trust metric - for key, p := range peers { - tm := NewMetricWithConfig(tms.config) - - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric", "error", err) - } - tm.Init(p) - // Load the peer trust metric into the store - tms.peerMetrics[key] = tm - } - return true -} - -// Saves the history data for all peers to the store DB -func (tms *MetricStore) saveToDB() { - tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) - - peers := make(map[string]MetricHistoryJSON) - - for key, tm := range tms.peerMetrics { - // Add an entry for the peer identified by key - peers[key] = tm.HistoryJSON() - } - - // Write all the data back to the DB - bytes, err := json.Marshal(peers) - if err != nil { - tms.Logger.Error("Failed to encode the TrustHistory", "err", err) - return - } - if err := tms.db.SetSync(trustMetricKey, bytes); err != nil { - tms.Logger.Error("failed to flush data to disk", "error", err) - } -} - -// Periodically saves the trust history data to the DB -func (tms *MetricStore) saveRoutine() { - t := time.NewTicker(defaultStorePeriodicSaveInterval) - defer t.Stop() -loop: - for { - select { - case <-t.C: - tms.SaveToDB() - case <-tms.Quit(): - break loop - } - } -} diff --git a/internal/p2p/trust/store_test.go b/internal/p2p/trust/store_test.go deleted file mode 100644 index ecf17dc4ab..0000000000 --- a/internal/p2p/trust/store_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/libs/log" -) - -func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir := t.TempDir() - - historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) - require.NoError(t, err) - - // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.saveToDB() - // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - // Make sure we still have 0 entries - assert.Zero(t, store.Size()) - - // 100 TestTickers - var tt []*TestTicker - for i := 0; i < 100; i++ { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt = append(tt, NewTestTicker()) - } - // 100 peers - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - tm := NewMetric() - - tm.SetTicker(tt[i]) - err = tm.Start() - require.NoError(t, err) - store.AddPeerTrustMetric(key, tm) - - tm.BadEvents(10) - tm.GoodEvents(1) - } - // Check that we have 100 entries and save - assert.Equal(t, 100, store.Size()) - // Give the 100 metrics time to process the history data - for i := 0; i < 100; i++ { - tt[i].NextTick() - tt[i].NextTick() - } - // Stop all the trust metrics and save - err = store.Stop() - require.NoError(t, err) - - // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Check that we still have 100 peers with imperfect trust values - assert.Equal(t, 100, store.Size()) - for _, tm := range store.peerMetrics { - assert.NotEqual(t, 1.0, tm.TrustValue()) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreConfig(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - config := MetricConfig{ - ProportionalWeight: 0.5, - IntegralWeight: 0.5, - } - - // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") - - // Check that the options made it to the metric - assert.Equal(t, 0.5, tm.proportionalWeight) - assert.Equal(t, 0.5, tm.integralWeight) - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreLookup(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Create 100 peers in the trust metric store - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) - - // Check that the trust metric was successfully entered - ktm := store.peerMetrics[key] - assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - key := "TestKey" - tm := store.GetPeerTrustMetric(key) - - // This peer is innocent so far - first := tm.TrustScore() - assert.Equal(t, 100, first) - - // Add some undesirable events and disconnect - tm.BadEvents(1) - first = tm.TrustScore() - assert.NotEqual(t, 100, first) - tm.BadEvents(10) - second := tm.TrustScore() - - if second > first { - t.Errorf("a greater number of bad events should lower the trust score") - } - store.PeerDisconnected(key) - - // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) - assert.NotEqual(t, 100, tm.TrustScore()) - err = store.Stop() - require.NoError(t, err) -} diff --git a/internal/p2p/trust/ticker.go b/internal/p2p/trust/ticker.go deleted file mode 100644 index 3f0f309192..0000000000 --- a/internal/p2p/trust/ticker.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "time" -) - -// MetricTicker provides a single ticker interface for the trust metric -type MetricTicker interface { - // GetChannel returns the receive only channel that fires at each time interval - GetChannel() <-chan time.Time - - // Stop will halt further activity on the ticker channel - Stop() -} - -// The ticker used during testing that provides manual control over time intervals -type TestTicker struct { - C chan time.Time - stopped bool -} - -// NewTestTicker returns our ticker used within test routines -func NewTestTicker() *TestTicker { - c := make(chan time.Time) - return &TestTicker{ - C: c, - } -} - -func (t *TestTicker) GetChannel() <-chan time.Time { - return t.C -} - -func (t *TestTicker) Stop() { - t.stopped = true -} - -// NextInterval manually sends Time on the ticker channel -func (t *TestTicker) NextTick() { - if t.stopped { - return - } - t.C <- time.Now() -} - -// Ticker is just a wrap around time.Ticker that allows it -// to meet the requirements of our interface -type Ticker struct { - *time.Ticker -} - -// NewTicker returns a normal time.Ticker wrapped to meet our interface -func NewTicker(d time.Duration) *Ticker { - return &Ticker{time.NewTicker(d)} -} - -func (t *Ticker) GetChannel() <-chan time.Time { - return t.C -} diff --git a/internal/p2p/upnp/probe.go b/internal/p2p/upnp/probe.go deleted file mode 100644 index ae641abbbe..0000000000 --- a/internal/p2p/upnp/probe.go +++ /dev/null @@ -1,111 +0,0 @@ -package upnp - -import ( - "fmt" - "net" - "time" - - "github.com/tendermint/tendermint/libs/log" -) - -type Capabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, fmt.Errorf("nat upnp could not be discovered: %v", err) - } - logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, fmt.Errorf("external address error: %v", err) - } - logger.Info(fmt.Sprintf("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) - if err != nil { - return nat, nil, ext, fmt.Errorf("port mapping error: %v", err) - } - logger.Info(fmt.Sprintf("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, fmt.Errorf("error establishing listener: %v", err) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - logger.Info(fmt.Sprintf("Incoming connection read error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return supportsHairpin -} - -func Probe(logger log.Logger) (caps Capabilities, err error) { - logger.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) - } - if err := listener.Close(); err != nil { - logger.Error(fmt.Sprintf("Listener closing error: %v", err)) - } - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/internal/p2p/upnp/upnp.go b/internal/p2p/upnp/upnp.go deleted file mode 100644 index c00530acae..0000000000 --- a/internal/p2p/upnp/upnp.go +++ /dev/null @@ -1,404 +0,0 @@ -// Taken from taipei-torrent. -// Just enough UPnP to be able to forward ports -// For more information, see: http://www.upnp-hacks.org/upnp.html -package upnp - -// TODO: use syscalls to get actual ourIP, see issue #712 - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { - return nil, err - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - _, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - return - } - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("upnp port discovery failed") - return nat, err -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type Service struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []Service `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Contains(dl[i].DeviceType, deviceType) { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *Service { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Contains(sl[i].ServiceType, serviceType) { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) // nolint: gosec - if err != nil { - return - } - defer r.Body.Close() - - if r.StatusCode >= 400 { - err = errors.New(string(rune(r.StatusCode))) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { - err = errors.New("no InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("no WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("no WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("no WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return url, urnDomain, err -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - // req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return r, err -} - -type statusInfo struct { - externalIPAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - var envelope Envelope - data, err := ioutil.ReadAll(response.Body) - if err != nil { - return - } - reader := bytes.NewReader(data) - err = xml.NewDecoder(reader).Decode(&envelope) - if err != nil { - return - } - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return info, err -} - -// GetExternalAddress returns an external IP. If GetExternalIPAddress action -// fails or IP returned is invalid, GetExternalAddress returns an error. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIPAddress) - if addr == nil { - err = fmt.Errorf("failed to parse IP: %v", info.externalIPAddress) - } - return -} - -func (n *upnpNAT) AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := ioutil.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return mappedExternalPort, err -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go index aa5e8f0391..79d462f2c7 100644 --- a/internal/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -18,14 +18,15 @@ type AppConnConsensus interface { SetResponseCallback(abciclient.Callback) Error() error - InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) + InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) - EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) - CommitSync(context.Context) (*types.ResponseCommit, error) - - PreprocessTxsSync(context.Context, types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) + PrepareProposal(context.Context, types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) + ExtendVote(context.Context, types.RequestExtendVote) (*types.ResponseExtendVote, error) + VerifyVoteExtension(context.Context, types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) + BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) + EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + Commit(context.Context) (*types.ResponseCommit, error) } type AppConnMempool interface { @@ -33,27 +34,27 @@ type AppConnMempool interface { Error() error CheckTxAsync(context.Context, types.RequestCheckTx) (*abciclient.ReqRes, error) - CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) + CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) FlushAsync(context.Context) (*abciclient.ReqRes, error) - FlushSync(context.Context) error + Flush(context.Context) error } type AppConnQuery interface { Error() error - EchoSync(context.Context, string) (*types.ResponseEcho, error) - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) + Echo(context.Context, string) (*types.ResponseEcho, error) + Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error) } type AppConnSnapshot interface { Error() error - ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //----------------------------------------------------------------------------------------- @@ -64,6 +65,8 @@ type appConnConsensus struct { appConn abciclient.Client } +var _ AppConnConsensus = (*appConnConsensus)(nil) + func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { return &appConnConsensus{ metrics: metrics, @@ -79,48 +82,65 @@ func (app *appConnConsensus) Error() error { return app.appConn.Error() } -func (app *appConnConsensus) InitChainSync( +func (app *appConnConsensus) InitChain( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() - return app.appConn.InitChainSync(ctx, req) + return app.appConn.InitChain(ctx, req) +} + +func (app *appConnConsensus) PrepareProposal( + ctx context.Context, + req types.RequestPrepareProposal, +) (*types.ResponsePrepareProposal, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() + return app.appConn.PrepareProposal(ctx, req) +} + +func (app *appConnConsensus) ExtendVote( + ctx context.Context, + req types.RequestExtendVote, +) (*types.ResponseExtendVote, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() + return app.appConn.ExtendVote(ctx, req) } -func (app *appConnConsensus) BeginBlockSync( +func (app *appConnConsensus) VerifyVoteExtension( + ctx context.Context, + req types.RequestVerifyVoteExtension, +) (*types.ResponseVerifyVoteExtension, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() + return app.appConn.VerifyVoteExtension(ctx, req) +} + +func (app *appConnConsensus) BeginBlock( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() - return app.appConn.BeginBlockSync(ctx, req) + return app.appConn.BeginBlock(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync( +func (app *appConnConsensus) DeliverTx( ctx context.Context, req types.RequestDeliverTx, -) (*abciclient.ReqRes, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "async"))() - return app.appConn.DeliverTxAsync(ctx, req) +) (*types.ResponseDeliverTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() + return app.appConn.DeliverTx(ctx, req) } -func (app *appConnConsensus) EndBlockSync( +func (app *appConnConsensus) EndBlock( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() - return app.appConn.EndBlockSync(ctx, req) + return app.appConn.EndBlock(ctx, req) } -func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { +func (app *appConnConsensus) Commit(ctx context.Context) (*types.ResponseCommit, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() - return app.appConn.CommitSync(ctx) -} - -func (app *appConnConsensus) PreprocessTxsSync( - ctx context.Context, - req types.RequestPreprocessTxs, -) (*types.ResponsePreprocessTxs, error) { - return app.appConn.PreprocessTxsSync(ctx, req) + return app.appConn.Commit(ctx) } //------------------------------------------------ @@ -151,9 +171,9 @@ func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, return app.appConn.FlushAsync(ctx) } -func (app *appConnMempool) FlushSync(ctx context.Context) error { +func (app *appConnMempool) Flush(ctx context.Context) error { defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() - return app.appConn.FlushSync(ctx) + return app.appConn.Flush(ctx) } func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { @@ -161,9 +181,9 @@ func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCh return app.appConn.CheckTxAsync(ctx, req) } -func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *appConnMempool) CheckTx(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() - return app.appConn.CheckTxSync(ctx, req) + return app.appConn.CheckTx(ctx, req) } //------------------------------------------------ @@ -185,19 +205,19 @@ func (app *appConnQuery) Error() error { return app.appConn.Error() } -func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (app *appConnQuery) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() - return app.appConn.EchoSync(ctx, msg) + return app.appConn.Echo(ctx, msg) } -func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { +func (app *appConnQuery) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() - return app.appConn.InfoSync(ctx, req) + return app.appConn.Info(ctx, req) } -func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { +func (app *appConnQuery) Query(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() - return app.appConn.QuerySync(ctx, reqQuery) + return app.appConn.Query(ctx, reqQuery) } //------------------------------------------------ @@ -219,34 +239,34 @@ func (app *appConnSnapshot) Error() error { return app.appConn.Error() } -func (app *appConnSnapshot) ListSnapshotsSync( +func (app *appConnSnapshot) ListSnapshots( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() - return app.appConn.ListSnapshotsSync(ctx, req) + return app.appConn.ListSnapshots(ctx, req) } -func (app *appConnSnapshot) OfferSnapshotSync( +func (app *appConnSnapshot) OfferSnapshot( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() - return app.appConn.OfferSnapshotSync(ctx, req) + return app.appConn.OfferSnapshot(ctx, req) } -func (app *appConnSnapshot) LoadSnapshotChunkSync( +func (app *appConnSnapshot) LoadSnapshotChunk( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() - return app.appConn.LoadSnapshotChunkSync(ctx, req) + return app.appConn.LoadSnapshotChunk(ctx, req) } -func (app *appConnSnapshot) ApplySnapshotChunkSync( +func (app *appConnSnapshot) ApplySnapshotChunk( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() - return app.appConn.ApplySnapshotChunkSync(ctx, req) + return app.appConn.ApplySnapshotChunk(ctx, req) } // addTimeSample returns a function that, when called, adds an observation to m. diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go index f1ae7fe1a8..de8eac35d0 100644 --- a/internal/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" @@ -17,9 +18,9 @@ import ( //---------------------------------------- type appConnTestI interface { - EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) - FlushSync(context.Context) error - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + Echo(context.Context, string) (*types.ResponseEcho, error) + Flush(context.Context) error + Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { @@ -30,16 +31,16 @@ func newAppConnTest(appConn abciclient.Client) appConnTestI { return &appConnTest{appConn} } -func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { - return app.appConn.EchoAsync(ctx, msg) +func (app *appConnTest) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + return app.appConn.Echo(ctx, msg) } -func (app *appConnTest) FlushSync(ctx context.Context) error { - return app.appConn.FlushSync(ctx) +func (app *appConnTest) Flush(ctx context.Context) error { + return app.appConn.Flush(ctx) } -func (app *appConnTest) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(ctx, req) +func (app *appConnTest) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.Info(ctx, req) } //---------------------------------------- @@ -48,47 +49,39 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(t, s.Start(ctx), "error starting socket server") + t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } + cli, err := clientCreator(logger.With("module", "abci-client")) + require.NoError(t, err, "Error creating ABCI client:") + + require.NoError(t, cli.Start(ctx), "Error starting ABCI client") proxy := newAppConnTest(cli) t.Log("Connected") - ctx := context.Background() for i := 0; i < 1000; i++ { - _, err = proxy.EchoAsync(ctx, fmt.Sprintf("echo-%v", i)) + _, err = proxy.Echo(ctx, fmt.Sprintf("echo-%v", i)) if err != nil { t.Error(err) } // flush sometimes if i%128 == 0 { - if err := proxy.FlushSync(ctx); err != nil { + if err := proxy.Flush(ctx); err != nil { t.Error(err) } } } - if err := proxy.FlushSync(ctx); err != nil { + if err := proxy.Flush(ctx); err != nil { t.Error(err) } } @@ -96,90 +89,74 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - b.Fatalf("Error starting socket server: %v", err.Error()) - } - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(b, s.Start(ctx), "Error starting socket server") + b.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator() - if err != nil { - b.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - b.Fatalf("Error starting ABCI client: %v", err.Error()) - } + cli, err := clientCreator(logger.With("module", "abci-client")) + require.NoError(b, err, "Error creating ABCI client") + + require.NoError(b, cli.Start(ctx), "Error starting ABCI client") proxy := newAppConnTest(cli) b.Log("Connected") echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests - ctx := context.Background() for i := 0; i < b.N; i++ { - _, err = proxy.EchoAsync(ctx, echoString) + _, err = proxy.Echo(ctx, echoString) if err != nil { b.Error(err) } // flush sometimes if i%128 == 0 { - if err := proxy.FlushSync(ctx); err != nil { + if err := proxy.Flush(ctx); err != nil { b.Error(err) } } } - if err := proxy.FlushSync(ctx); err != nil { + if err := proxy.Flush(ctx); err != nil { b.Error(err) } b.StopTimer() - // info := proxy.InfoSync(types.RequestInfo{""}) + // info := proxy.Info(types.RequestInfo{""}) // b.Log("N: ", b.N, info) } func TestInfo(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) + logger := log.TestingLogger() + clientCreator := abciclient.NewRemoteCreator(logger, sockPath, SOCKET, true) // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(t, s.Start(ctx), "Error starting socket server") + t.Cleanup(func() { cancel(); s.Wait() }) // Start client - cli, err := clientCreator() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } + cli, err := clientCreator(logger.With("module", "abci-client")) + require.NoError(t, err, "Error creating ABCI client") + + require.NoError(t, cli.Start(ctx), "Error starting ABCI client") proxy := newAppConnTest(cli) t.Log("Connected") - resInfo, err := proxy.InfoSync(context.Background(), RequestInfo) - if err != nil { - t.Errorf("unexpected error: %v", err) - } + resInfo, err := proxy.Info(ctx, RequestInfo) + require.NoError(t, err) + if resInfo.Data != "{\"size\":0}" { t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") } diff --git a/internal/proxy/client.go b/internal/proxy/client.go index ddb9a928de..d01634bdf7 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -6,6 +6,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/app" ) @@ -15,12 +16,12 @@ import ( // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io.Closer) { +func DefaultClientCreator(logger log.Logger, addr, transport, dbDir string) (abciclient.Creator, io.Closer) { switch addr { case "kvstore": return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} case "persistent_kvstore": - app := kvstore.NewPersistentKVStoreApplication(dbDir) + app := kvstore.NewPersistentKVStoreApplication(logger, dbDir) return abciclient.NewLocalCreator(app), app case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) @@ -32,7 +33,7 @@ func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} default: mustConnect := false // loop retrying - return abciclient.NewRemoteCreator(addr, transport, mustConnect), noopCloser{} + return abciclient.NewRemoteCreator(logger, addr, transport, mustConnect), noopCloser{} } } diff --git a/internal/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go index 266bacf6a6..9c03cacae7 100644 --- a/internal/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -17,8 +17,8 @@ type AppConnConsensus struct { mock.Mock } -// BeginBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +// BeginBlock provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseBeginBlock @@ -40,8 +40,8 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.Reques return r0, r1 } -// CommitSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { +// Commit provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, error) { ret := _m.Called(_a0) var r0 *types.ResponseCommit @@ -63,16 +63,16 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm return r0, r1 } -// DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { +// DeliverTx provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { ret := _m.Called(_a0, _a1) - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { + var r0 *types.ResponseDeliverTx + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) + r0 = ret.Get(0).(*types.ResponseDeliverTx) } } @@ -86,8 +86,8 @@ func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.Reques return r0, r1 } -// EndBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +// EndBlock provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseEndBlock @@ -123,8 +123,31 @@ func (_m *AppConnConsensus) Error() error { return r0 } -// InitChainSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { +// ExtendVote provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseExtendVote + if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseExtendVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InitChain provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseInitChain @@ -146,21 +169,21 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request return r0, r1 } -// PreprocessTxsSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) PreprocessTxsSync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { +// PrepareProposal provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponsePreprocessTxs - if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponsePreprocessTxs) + r0 = ret.Get(0).(*types.ResponsePrepareProposal) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -173,3 +196,26 @@ func (_m *AppConnConsensus) PreprocessTxsSync(_a0 context.Context, _a1 types.Req func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } + +// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseVerifyVoteExtension + if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/internal/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go index 5429d8f909..9d8b80717f 100644 --- a/internal/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -17,16 +17,16 @@ type AppConnMempool struct { mock.Mock } -// CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { + var r0 *types.ResponseCheckTx + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) + r0 = ret.Get(0).(*types.ResponseCheckTx) } } @@ -40,16 +40,16 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestChe return r0, r1 } -// CheckTxSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { +// CheckTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -77,6 +77,20 @@ func (_m *AppConnMempool) Error() error { return r0 } +// Flush provides a mock function with given fields: _a0 +func (_m *AppConnMempool) Flush(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // FlushAsync provides a mock function with given fields: _a0 func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) @@ -100,20 +114,6 @@ func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, e return r0, r1 } -// FlushSync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // SetResponseCallback provides a mock function with given fields: _a0 func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) diff --git a/internal/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go index 47ac5bef94..e515cb784e 100644 --- a/internal/proxy/mocks/app_conn_query.go +++ b/internal/proxy/mocks/app_conn_query.go @@ -15,8 +15,8 @@ type AppConnQuery struct { mock.Mock } -// EchoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) EchoSync(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { +// Echo provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseEcho @@ -52,8 +52,8 @@ func (_m *AppConnQuery) Error() error { return r0 } -// InfoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { +// Info provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseInfo @@ -75,8 +75,8 @@ func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*t return r0, r1 } -// QuerySync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { +// Query provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseQuery diff --git a/internal/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go index 0b6f10ce13..0b3f06ad70 100644 --- a/internal/proxy/mocks/app_conn_snapshot.go +++ b/internal/proxy/mocks/app_conn_snapshot.go @@ -15,8 +15,8 @@ type AppConnSnapshot struct { mock.Mock } -// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseApplySnapshotChunk @@ -52,8 +52,8 @@ func (_m *AppConnSnapshot) Error() error { return r0 } -// ListSnapshotsSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +// ListSnapshots provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseListSnapshots @@ -75,8 +75,8 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.Requ return r0, r1 } -// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseLoadSnapshotChunk @@ -98,8 +98,8 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types. return r0, r1 } -// OfferSnapshotSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +// OfferSnapshot provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseOfferSnapshot diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go index 0bcc64af67..56b70b7b3a 100644 --- a/internal/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -1,12 +1,14 @@ package proxy import ( + "context" + "errors" "fmt" "os" "syscall" abciclient "github.com/tendermint/tendermint/abci/client" - tmlog "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -33,8 +35,8 @@ type AppConns interface { } // NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { - return NewMultiAppConn(clientCreator, metrics) +func NewAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { + return NewMultiAppConn(clientCreator, logger, metrics) } // multiAppConn implements AppConns. @@ -44,6 +46,7 @@ func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { // TODO: on app restart, clients must reboot together type multiAppConn struct { service.BaseService + logger log.Logger metrics *Metrics consensusConn AppConnConsensus @@ -51,21 +54,30 @@ type multiAppConn struct { queryConn AppConnQuery snapshotConn AppConnSnapshot - consensusConnClient abciclient.Client - mempoolConnClient abciclient.Client - queryConnClient abciclient.Client - snapshotConnClient abciclient.Client + consensusConnClient stoppableClient + mempoolConnClient stoppableClient + queryConnClient stoppableClient + snapshotConnClient stoppableClient clientCreator abciclient.Creator } +// TODO: this is a totally internal and quasi permanent shim for +// clients. eventually we can have a single client and have some kind +// of reasonable lifecycle witout needing an explicit stop method. +type stoppableClient interface { + abciclient.Client + Stop() error +} + // NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator abciclient.Creator, metrics *Metrics) AppConns { +func NewMultiAppConn(clientCreator abciclient.Creator, logger log.Logger, metrics *Metrics) AppConns { multiAppConn := &multiAppConn{ + logger: logger, metrics: metrics, clientCreator: clientCreator, } - multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) + multiAppConn.BaseService = *service.NewBaseService(logger, "multiAppConn", multiAppConn) return multiAppConn } @@ -85,40 +97,40 @@ func (app *multiAppConn) Snapshot() AppConnSnapshot { return app.snapshotConn } -func (app *multiAppConn) OnStart() error { - c, err := app.abciClientFor(connQuery) +func (app *multiAppConn) OnStart(ctx context.Context) error { + c, err := app.abciClientFor(ctx, connQuery) if err != nil { return err } - app.queryConnClient = c + app.queryConnClient = c.(stoppableClient) app.queryConn = NewAppConnQuery(c, app.metrics) - c, err = app.abciClientFor(connSnapshot) + c, err = app.abciClientFor(ctx, connSnapshot) if err != nil { app.stopAllClients() return err } - app.snapshotConnClient = c + app.snapshotConnClient = c.(stoppableClient) app.snapshotConn = NewAppConnSnapshot(c, app.metrics) - c, err = app.abciClientFor(connMempool) + c, err = app.abciClientFor(ctx, connMempool) if err != nil { app.stopAllClients() return err } - app.mempoolConnClient = c + app.mempoolConnClient = c.(stoppableClient) app.mempoolConn = NewAppConnMempool(c, app.metrics) - c, err = app.abciClientFor(connConsensus) + c, err = app.abciClientFor(ctx, connConsensus) if err != nil { app.stopAllClients() return err } - app.consensusConnClient = c + app.consensusConnClient = c.(stoppableClient) app.consensusConn = NewAppConnConsensus(c, app.metrics) // Kill Tendermint if the ABCI application crashes. - go app.killTMOnClientError() + app.startWatchersForClientErrorToKillTendermint(ctx) return nil } @@ -127,8 +139,13 @@ func (app *multiAppConn) OnStop() { app.stopAllClients() } -func (app *multiAppConn) killTMOnClientError() { - killFn := func(conn string, err error, logger tmlog.Logger) { +func (app *multiAppConn) startWatchersForClientErrorToKillTendermint(ctx context.Context) { + // this function starts a number of threads (per abci client) + // that will SIGTERM's our own PID if any of the ABCI clients + // exit/return early. If the context is canceled then these + // functions will not kill tendermint. + + killFn := func(conn string, err error, logger log.Logger) { logger.Error( fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), "err", err) @@ -137,56 +154,80 @@ func (app *multiAppConn) killTMOnClientError() { } } - select { - case <-app.consensusConnClient.Quit(): - if err := app.consensusConnClient.Error(); err != nil { - killFn(connConsensus, err, app.Logger) - } - case <-app.mempoolConnClient.Quit(): - if err := app.mempoolConnClient.Error(); err != nil { - killFn(connMempool, err, app.Logger) - } - case <-app.queryConnClient.Quit(): - if err := app.queryConnClient.Error(); err != nil { - killFn(connQuery, err, app.Logger) - } - case <-app.snapshotConnClient.Quit(): - if err := app.snapshotConnClient.Error(); err != nil { - killFn(connSnapshot, err, app.Logger) - } + type op struct { + connClient stoppableClient + name string + } + + for _, client := range []op{ + { + connClient: app.consensusConnClient, + name: connConsensus, + }, + { + connClient: app.mempoolConnClient, + name: connMempool, + }, + { + connClient: app.queryConnClient, + name: connQuery, + }, + { + connClient: app.snapshotConnClient, + name: connSnapshot, + }, + } { + go func(name string, client stoppableClient) { + client.Wait() + if ctx.Err() != nil { + return + } + if err := client.Error(); err != nil { + killFn(name, err, app.logger) + } + }(client.name, client.connClient) } } func (app *multiAppConn) stopAllClients() { if app.consensusConnClient != nil { if err := app.consensusConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping consensus client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.logger.Error("error while stopping consensus client", "error", err) + } } } if app.mempoolConnClient != nil { if err := app.mempoolConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping mempool client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.logger.Error("error while stopping mempool client", "error", err) + } } } if app.queryConnClient != nil { if err := app.queryConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping query client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.logger.Error("error while stopping query client", "error", err) + } } } if app.snapshotConnClient != nil { if err := app.snapshotConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping snapshot client", "error", err) + if !errors.Is(err, service.ErrAlreadyStopped) { + app.logger.Error("error while stopping snapshot client", "error", err) + } } } } -func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { - c, err := app.clientCreator() +func (app *multiAppConn) abciClientFor(ctx context.Context, conn string) (abciclient.Client, error) { + c, err := app.clientCreator(app.logger.With( + "module", "abci-client", + "connection", conn)) if err != nil { return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) } - c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) - if err := c.Start(); err != nil { + if err := c.Start(ctx); err != nil { return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) } return c, nil diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go index 25ed692aba..98ea0ca535 100644 --- a/internal/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "errors" "os" "os/signal" @@ -14,34 +15,44 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" abcimocks "github.com/tendermint/tendermint/abci/client/mocks" + "github.com/tendermint/tendermint/libs/log" ) +type noopStoppableClientImpl struct { + abciclient.Client + count int +} + +func (c *noopStoppableClientImpl) Stop() error { c.count++; return nil } + func TestAppConns_Start_Stop(t *testing.T) { - quitCh := make(<-chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return().Times(4) - clientMock.On("Start").Return(nil).Times(4) - clientMock.On("Stop").Return(nil).Times(4) - clientMock.On("Quit").Return(quitCh).Times(4) + clientMock.On("Start", mock.Anything).Return(nil).Times(4) + clientMock.On("Error").Return(nil) + clientMock.On("Wait").Return(nil).Times(4) + cl := &noopStoppableClientImpl{Client: clientMock} creatorCallCount := 0 - creator := func() (abciclient.Client, error) { + creator := func(logger log.Logger) (abciclient.Client, error) { creatorCallCount++ - return clientMock, nil + return cl, nil } - appConns := NewAppConns(creator, NopMetrics()) + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - err := appConns.Start() + err := appConns.Start(ctx) require.NoError(t, err) time.Sleep(100 * time.Millisecond) - err = appConns.Stop() - require.NoError(t, err) + cancel() + appConns.Wait() clientMock.AssertExpectations(t) + assert.Equal(t, 4, cl.count) assert.Equal(t, 4, creatorCallCount) } @@ -53,37 +64,30 @@ func TestAppConns_Failure(t *testing.T) { go func() { for range c { close(ok) + return } }() - quitCh := make(chan struct{}) - var recvQuitCh <-chan struct{} // nolint:gosimple - recvQuitCh = quitCh + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return() - clientMock.On("Start").Return(nil) - clientMock.On("Stop").Return(nil) + clientMock.On("Start", mock.Anything).Return(nil) - clientMock.On("Quit").Return(recvQuitCh) - clientMock.On("Error").Return(errors.New("EOF")).Once() + clientMock.On("Wait").Return(nil) + clientMock.On("Error").Return(errors.New("EOF")) + cl := &noopStoppableClientImpl{Client: clientMock} - creator := func() (abciclient.Client, error) { - return clientMock, nil + creator := func(log.Logger) (abciclient.Client, error) { + return cl, nil } - appConns := NewAppConns(creator, NopMetrics()) + appConns := NewAppConns(creator, log.TestingLogger(), NopMetrics()) - err := appConns.Start() + err := appConns.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := appConns.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate failure - close(quitCh) + t.Cleanup(func() { cancel(); appConns.Wait() }) select { case <-ok: diff --git a/internal/pubsub/example_test.go b/internal/pubsub/example_test.go new file mode 100644 index 0000000000..5eea61eb81 --- /dev/null +++ b/internal/pubsub/example_test.go @@ -0,0 +1,34 @@ +package pubsub_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" +) + +func TestExample(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t, log.TestingLogger()) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "example-client", + Query: query.MustCompile(`abci.account.name='John'`), + })) + + events := []abci.Event{ + { + Type: "abci.account", + Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, + }, + } + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Tombstone"), events)) + sub.mustReceive(ctx, pubstring("Tombstone")) +} diff --git a/internal/pubsub/pubsub.go b/internal/pubsub/pubsub.go new file mode 100644 index 0000000000..5f6a1ee3ba --- /dev/null +++ b/internal/pubsub/pubsub.go @@ -0,0 +1,443 @@ +// Package pubsub implements an event dispatching server with a single publisher +// and multiple subscriber clients. Multiple goroutines can safely publish to a +// single Server instance. +// +// Clients register subscriptions with a query to select which messages they +// wish to receive. When messages are published, they are broadcast to all +// clients whose subscription query matches that message. Queries are +// constructed using the github.com/tendermint/tendermint/internal/pubsub/query +// package. +// +// Example: +// +// q, err := query.New(`account.name='John'`) +// if err != nil { +// return err +// } +// sub, err := pubsub.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ +// ClientID: "johns-transactions", +// Query: q, +// }) +// if err != nil { +// return err +// } +// +// for { +// next, err := sub.Next(ctx) +// if err == pubsub.ErrTerminated { +// return err // terminated by publisher +// } else if err != nil { +// return err // timed out, client unsubscribed, etc. +// } +// process(next) +// } +// +package pubsub + +import ( + "context" + "errors" + "fmt" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") + + // ErrServerStopped is returned when attempting to publish or subscribe to a + // server that has been stopped. + ErrServerStopped = errors.New("pubsub server is stopped") +) + +// SubscribeArgs are the parameters to create a new subscription. +type SubscribeArgs struct { + ClientID string // Client ID + Query *query.Query // filter query for events (required) + Limit int // subscription queue capacity limit (0 means 1) + Quota int // subscription queue soft quota (0 uses Limit) +} + +// UnsubscribeArgs are the parameters to remove a subscription. +// The subscriber ID must be populated, and at least one of the client ID or +// the registered query. +type UnsubscribeArgs struct { + Subscriber string // subscriber ID chosen by the client (required) + ID string // subscription ID (assigned by the server) + Query *query.Query // the query registered with the subscription +} + +// Validate returns nil if args are valid to identify a subscription to remove. +// Otherwise, it reports an error. +func (args UnsubscribeArgs) Validate() error { + if args.Subscriber == "" { + return errors.New("must specify a subscriber") + } + return nil +} + +// Server allows clients to subscribe/unsubscribe for messages, publishing +// messages with or without events, and manages internal state. +type Server struct { + service.BaseService + logger log.Logger + + queue chan item + done <-chan struct{} // closed when server should exit + pubs sync.RWMutex // excl: shutdown; shared: active publisher + exited chan struct{} // server exited + + // All subscriptions currently known. + // Lock exclusive to add, remove, or cancel subscriptions. + // Lock shared to look up or publish to subscriptions. + subs struct { + sync.RWMutex + index *subIndex + + // This function is called synchronously with each message published + // before it is delivered to any other subscriber. This allows an index + // to be persisted before any subscribers see the messages. + observe func(Message) error + } + + // TODO(creachadair): Rework the options so that this does not need to live + // as a field. It is not otherwise needed. + queueCap int +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. +func NewServer(logger log.Logger, options ...Option) *Server { + s := &Server{logger: logger} + + s.BaseService = *service.NewBaseService(logger, "PubSub", s) + for _, opt := range options { + opt(s) + } + + // The queue receives items to be published. + s.queue = make(chan item, s.queueCap) + + // The index tracks subscriptions by ID and query terms. + s.subs.index = newSubIndex() + + return s +} + +// BufferCapacity allows you to specify capacity for publisher's queue. This +// is the number of messages that can be published without blocking. If no +// buffer is specified, publishing is synchronous with delivery. This function +// will panic if cap < 0. +func BufferCapacity(cap int) Option { + if cap < 0 { + panic("negative buffer capacity") + } + return func(s *Server) { s.queueCap = cap } +} + +// BufferCapacity returns capacity of the publication queue. +func (s *Server) BufferCapacity() int { return cap(s.queue) } + +// Subscribe creates a subscription for the given client ID and query. +// If len(capacities) > 0, its first value is used as the queue capacity. +// +// Deprecated: Use SubscribeWithArgs. This method will be removed in v0.36. +func (s *Server) Subscribe(ctx context.Context, clientID string, query *query.Query, capacities ...int) (*Subscription, error) { + args := SubscribeArgs{ + ClientID: clientID, + Query: query, + Limit: 1, + } + if len(capacities) > 0 { + args.Limit = capacities[0] + if len(capacities) > 1 { + args.Quota = capacities[1] + } + // bounds are checked below + } + return s.SubscribeWithArgs(ctx, args) +} + +// Observe registers an observer function that will be called synchronously +// with each published message matching any of the given queries, prior to it +// being forwarded to any subscriber. If no queries are specified, all +// messages will be observed. An error is reported if an observer is already +// registered. +func (s *Server) Observe(ctx context.Context, observe func(Message) error, queries ...*query.Query) error { + s.subs.Lock() + defer s.subs.Unlock() + if observe == nil { + return errors.New("observe callback is nil") + } else if s.subs.observe != nil { + return errors.New("an observer is already registered") + } + + // Compile the message filter. + var matches func(Message) bool + if len(queries) == 0 { + matches = func(Message) bool { return true } + } else { + matches = func(msg Message) bool { + for _, q := range queries { + if q.Matches(msg.events) { + return true + } + } + return false + } + } + + s.subs.observe = func(msg Message) error { + if matches(msg) { + return observe(msg) + } + return nil // nothing to do for this message + } + return nil +} + +// SubscribeWithArgs creates a subscription for the given arguments. It is an +// error if the query is nil, a subscription already exists for the specified +// client ID and query, or if the capacity arguments are invalid. +func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Subscription, error) { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return nil, ErrServerStopped + } else if s.subs.index.contains(args.ClientID, args.Query.String()) { + return nil, ErrAlreadySubscribed + } + + if args.Limit == 0 { + args.Limit = 1 + } + sub, err := newSubscription(args.Quota, args.Limit) + if err != nil { + return nil, err + } + s.subs.index.add(&subInfo{ + clientID: args.ClientID, + query: args.Query, + subID: sub.id, + sub: sub, + }) + return sub, nil +} + +// Unsubscribe removes the subscription for the given client and/or query. It +// returns ErrSubscriptionNotFound if no such subscription exists. +func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { + if err := args.Validate(); err != nil { + return err + } + s.subs.Lock() + defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped + } + + // TODO(creachadair): Do we need to support unsubscription for an "empty" + // query? I believe that case is not possible by the Query grammar, but we + // should make sure. + // + // Revisit this logic once we are able to remove indexing by query. + + var evict subInfoSet + if args.Subscriber != "" { + evict = s.subs.index.findClientID(args.Subscriber) + if args.Query != nil { + evict = evict.withQuery(args.Query.String()) + } + } else { + evict = s.subs.index.findQuery(args.Query.String()) + } + + if len(evict) == 0 { + return ErrSubscriptionNotFound + } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} + +// UnsubscribeAll removes all subscriptions for the given client ID. +// It returns ErrSubscriptionNotFound if no subscriptions exist for that client. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.subs.Lock() + defer s.subs.Unlock() + + evict := s.subs.index.findClientID(clientID) + if len(evict) == 0 { + return ErrSubscriptionNotFound + } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} + +// NumClients returns the number of clients. +func (s *Server) NumClients() int { + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.byClient) +} + +// NumClientSubscriptions returns the number of subscriptions the client has. +func (s *Server) NumClientSubscriptions(clientID string) int { + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.findClientID(clientID)) +} + +// Publish publishes the given message. An error will be returned to the caller +// if the context is canceled. +func (s *Server) Publish(ctx context.Context, msg types.EventData) error { + return s.publish(ctx, msg, []abci.Event{}) +} + +// PublishWithEvents publishes the given message with the set of events. The set +// is matched with clients queries. If there is a match, the message is sent to +// the client. +func (s *Server) PublishWithEvents(ctx context.Context, msg types.EventData, events []abci.Event) error { + return s.publish(ctx, msg, events) +} + +// OnStop implements part of the Service interface. It is a no-op. +func (s *Server) OnStop() {} + +// Wait implements Service.Wait by blocking until the server has exited, then +// yielding to the base service wait. +func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } + +// OnStart implements Service.OnStart by starting the server. +func (s *Server) OnStart(ctx context.Context) error { s.run(ctx); return nil } + +// OnReset implements Service.OnReset. It has no effect for this service. +func (s *Server) OnReset() error { return nil } + +func (s *Server) publish(ctx context.Context, data types.EventData, events []abci.Event) error { + s.pubs.RLock() + defer s.pubs.RUnlock() + + select { + case <-s.done: + return ErrServerStopped + case <-ctx.Done(): + return ctx.Err() + case s.queue <- item{ + Data: data, + Events: events, + }: + return nil + } +} + +func (s *Server) run(ctx context.Context) { + // The server runs until ctx is canceled. + s.done = ctx.Done() + queue := s.queue + + // Shutdown monitor: When the context ends, wait for any active publish + // calls to exit, then close the queue to signal the sender to exit. + go func() { + <-ctx.Done() + s.pubs.Lock() + defer s.pubs.Unlock() + close(s.queue) + s.queue = nil + }() + + s.exited = make(chan struct{}) + go func() { + defer close(s.exited) + + // Sender: Service the queue and forward messages to subscribers. + for it := range queue { + if err := s.send(it.Data, it.Events); err != nil { + s.logger.Error("error sending event", "err", err) + } + } + // Terminate all subscribers before exit. + s.subs.Lock() + defer s.subs.Unlock() + for si := range s.subs.index.all { + si.sub.stop(ErrTerminated) + } + s.subs.index = nil + }() +} + +// removeSubs cancels and removes all the subscriptions in evict with the given +// error. The caller must hold the s.subs lock. +func (s *Server) removeSubs(evict subInfoSet, reason error) { + for si := range evict { + si.sub.stop(reason) + } + s.subs.index.removeAll(evict) +} + +// send delivers the given message to all matching subscribers. An error in +// query matching stops transmission and is returned. +func (s *Server) send(data types.EventData, events []abci.Event) error { + // At exit, evict any subscriptions that were too slow. + evict := make(subInfoSet) + defer func() { + if len(evict) != 0 { + s.subs.Lock() + defer s.subs.Unlock() + s.removeSubs(evict, ErrTerminated) + } + }() + + // N.B. Order is important here. We must acquire and defer the lock release + // AFTER deferring the eviction cleanup: The cleanup must happen after the + // reader lock has released, or it will deadlock. + s.subs.RLock() + defer s.subs.RUnlock() + + // If an observer is defined, give it control of the message before + // attempting to deliver it to any matching subscribers. If the observer + // fails, the message will not be forwarded. + if s.subs.observe != nil { + err := s.subs.observe(Message{ + data: data, + events: events, + }) + if err != nil { + return fmt.Errorf("observer failed on message: %w", err) + } + } + + for si := range s.subs.index.all { + if !si.query.Matches(events) { + continue + } + + // Publish the events to the subscriber's queue. If this fails, e.g., + // because the queue is over capacity or out of quota, evict the + // subscription from the index. + if err := si.sub.publish(Message{ + subID: si.sub.id, + data: data, + events: events, + }); err != nil { + evict.add(si) + } + } + + return nil +} diff --git a/internal/pubsub/pubsub_test.go b/internal/pubsub/pubsub_test.go new file mode 100644 index 0000000000..9ba515d708 --- /dev/null +++ b/internal/pubsub/pubsub_test.go @@ -0,0 +1,470 @@ +package pubsub_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +const ( + clientID = "test-client" +) + +// pubstring is a trivial implementation of the EventData interface for +// string-valued test data. +type pubstring string + +func (pubstring) TypeTag() string { return "pubstring" } + +func TestSubscribeWithArgs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + t.Run("DefaultLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + + require.Equal(t, 1, s.NumClients()) + require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + + require.NoError(t, s.Publish(ctx, pubstring("Ka-Zar"))) + sub.mustReceive(ctx, pubstring("Ka-Zar")) + }) + t.Run("PositiveLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID + "-2", + Query: query.All, + Limit: 10, + })) + require.NoError(t, s.Publish(ctx, pubstring("Aggamon"))) + sub.mustReceive(ctx, pubstring("Aggamon")) + }) +} + +func TestObserver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.TestingLogger() + + s := newTestServer(ctx, t, logger) + + done := make(chan struct{}) + var got interface{} + require.NoError(t, s.Observe(ctx, func(msg pubsub.Message) error { + defer close(done) + got = msg.Data() + return nil + })) + + const input = pubstring("Lions and tigers and bears, oh my!") + require.NoError(t, s.Publish(ctx, input)) + <-done + require.Equal(t, got, input) +} + +func TestObserverErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + + s := newTestServer(ctx, t, logger) + + require.Error(t, s.Observe(ctx, nil, query.All)) + require.NoError(t, s.Observe(ctx, func(pubsub.Message) error { return nil })) + require.Error(t, s.Observe(ctx, func(pubsub.Message) error { return nil }, query.All)) +} + +func TestPublishDoesNotBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + published := make(chan struct{}) + go func() { + defer close(published) + + require.NoError(t, s.Publish(ctx, pubstring("Quicksilver"))) + require.NoError(t, s.Publish(ctx, pubstring("Asylum"))) + require.NoError(t, s.Publish(ctx, pubstring("Ivan"))) + }() + + select { + case <-published: + sub.mustReceive(ctx, pubstring("Quicksilver")) + sub.mustFail(ctx, pubsub.ErrTerminated) + case <-time.After(3 * time.Second): + t.Fatal("Publishing should not have blocked") + } +} + +func TestSubscribeErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + t.Run("NegativeLimitErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + Limit: -5, + }) + require.Error(t, err) + }) +} + +func TestSlowSubscriber(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + + require.NoError(t, s.Publish(ctx, pubstring("Fat Cobra"))) + require.NoError(t, s.Publish(ctx, pubstring("Viper"))) + require.NoError(t, s.Publish(ctx, pubstring("Black Panther"))) + + // We had capacity for one item, so we should get that item, but after that + // the subscription should have been terminated by the publisher. + sub.mustReceive(ctx, pubstring("Fat Cobra")) + sub.mustFail(ctx, pubsub.ErrTerminated) +} + +func TestDifferentClients(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-1", + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} + + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Iceman"), events)) + sub1.mustReceive(ctx, pubstring("Iceman")) + + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-2", + Query: query.MustCompile(`tm.events.type='NewBlock' AND abci.account.name='Igor'`), + })) + + events = []abci.Event{ + { + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }, + { + Type: "abci.account", + Attributes: []abci.EventAttribute{{Key: "name", Value: "Igor"}}, + }, + } + + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Ultimo"), events)) + sub1.mustReceive(ctx, pubstring("Ultimo")) + sub2.mustReceive(ctx, pubstring("Ultimo")) + + sub3 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-3", + Query: query.MustCompile( + `tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10`), + })) + + events = []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, + }} + + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Valeria Richards"), events)) + sub3.mustTimeOut(ctx, 100*time.Millisecond) +} + +func TestSubscribeDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + testCases := []struct { + query string + expected types.EventData + }{ + {`withdraw.rewards='17'`, pubstring("Iceman")}, + {`withdraw.rewards='22'`, pubstring("Iceman")}, + {`withdraw.rewards='1' AND withdraw.rewards='22'`, pubstring("Iceman")}, + {`withdraw.rewards='100'`, nil}, + } + + for i, tc := range testCases { + id := fmt.Sprintf("client-%d", i) + q := query.MustCompile(tc.query) + t.Run(id, func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: id, + Query: q, + })) + + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "sender", Value: "bar"}, + {Key: "sender", Value: "baz"}, + }, + }, + { + Type: "withdraw", + Attributes: []abci.EventAttribute{ + {Key: "rewards", Value: "1"}, + {Key: "rewards", Value: "17"}, + {Key: "rewards", Value: "22"}, + }, + }, + } + + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Iceman"), events)) + + if tc.expected != nil { + sub.mustReceive(ctx, tc.expected) + } else { + sub.mustTimeOut(ctx, 100*time.Millisecond) + } + }) + } +} + +func TestClientSubscribesTwice(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + q := query.MustCompile(`tm.events.type='NewBlock'`) + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + })) + + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Goblin Queen"), events)) + sub1.mustReceive(ctx, pubstring("Goblin Queen")) + + // Subscribing a second time with the same client ID and query fails. + { + sub2, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + }) + require.Error(t, err) + require.Nil(t, sub2) + } + + // The attempt to re-subscribe does not disrupt the existing sub. + require.NoError(t, s.PublishWithEvents(ctx, pubstring("Spider-Man"), events)) + sub1.mustReceive(ctx, pubstring("Spider-Man")) +} + +func TestUnsubscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + // Removing the subscription we just made should succeed. + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + // Publishing should still work. + require.NoError(t, s.Publish(ctx, pubstring("Nick Fury"))) + + // The unsubscribed subscriber should report as such. + sub.mustFail(ctx, pubsub.ErrUnsubscribed) +} + +func TestClientUnsubscribesTwice(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + require.ErrorIs(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + }), pubsub.ErrSubscriptionNotFound) + require.ErrorIs(t, s.UnsubscribeAll(ctx, clientID), pubsub.ErrSubscriptionNotFound) +} + +func TestResubscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + args := pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + } + newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.All, + })) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Publish(ctx, pubstring("Cable"))) + sub.mustReceive(ctx, pubstring("Cable")) +} + +func TestUnsubscribeAll(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.TestingLogger() + s := newTestServer(ctx, t, logger) + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlockHeader'`), + })) + + require.NoError(t, s.UnsubscribeAll(ctx, clientID)) + require.NoError(t, s.Publish(ctx, pubstring("Nick Fury"))) + + sub1.mustFail(ctx, pubsub.ErrUnsubscribed) + sub2.mustFail(ctx, pubsub.ErrUnsubscribed) + +} + +func TestBufferCapacity(t *testing.T) { + logger := log.TestingLogger() + s := pubsub.NewServer(logger, pubsub.BufferCapacity(2)) + + require.Equal(t, 2, s.BufferCapacity()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, s.Publish(ctx, pubstring("Nighthawk"))) + require.NoError(t, s.Publish(ctx, pubstring("Sage"))) + + ctx, cancel = context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + require.ErrorIs(t, s.Publish(ctx, pubstring("Ironclad")), context.DeadlineExceeded) +} + +func newTestServer(ctx context.Context, t testing.TB, logger log.Logger) *pubsub.Server { + t.Helper() + + s := pubsub.NewServer(logger) + + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) + return s +} + +type testSub struct { + t testing.TB + *pubsub.Subscription +} + +func newTestSub(t testing.TB) *testSub { return &testSub{t: t} } + +func (s *testSub) must(sub *pubsub.Subscription, err error) *testSub { + s.t.Helper() + require.NoError(s.t, err) + require.NotNil(s.t, sub) + s.Subscription = sub + return s +} + +func (s *testSub) mustReceive(ctx context.Context, want types.EventData) { + s.t.Helper() + got, err := s.Next(ctx) + require.NoError(s.t, err) + require.Equal(s.t, want, got.Data()) +} + +func (s *testSub) mustTimeOut(ctx context.Context, dur time.Duration) { + s.t.Helper() + tctx, cancel := context.WithTimeout(ctx, dur) + defer cancel() + got, err := s.Next(tctx) + if !errors.Is(err, context.DeadlineExceeded) { + s.t.Errorf("Next: got (%+v, %v), want %v", got, err, context.DeadlineExceeded) + } +} + +func (s *testSub) mustFail(ctx context.Context, want error) { + s.t.Helper() + got, err := s.Next(ctx) + if err == nil && want != nil { + s.t.Fatalf("Next: got (%+v, %v), want error %v", got, err, want) + } + require.ErrorIs(s.t, err, want) +} diff --git a/internal/pubsub/query/bench_test.go b/internal/pubsub/query/bench_test.go new file mode 100644 index 0000000000..0916e9c8af --- /dev/null +++ b/internal/pubsub/query/bench_test.go @@ -0,0 +1,55 @@ +package query_test + +import ( + "testing" + + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" +) + +const testQuery = `tm.events.type='NewBlock' AND abci.account.name='Igor'` + +var testEvents = []types.Event{ + { + Type: "tm.events", + Attributes: []types.EventAttribute{{ + Key: "index", + Value: "25", + }, { + Key: "type", + Value: "NewBlock", + }}, + }, + { + Type: "abci.account", + Attributes: []types.EventAttribute{{ + Key: "name", + Value: "Anya", + }, { + Key: "name", + Value: "Igor", + }}, + }, +} + +func BenchmarkParseCustom(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := query.New(testQuery) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkMatchCustom(b *testing.B) { + q, err := query.New(testQuery) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !q.Matches(testEvents) { + b.Error("no match") + } + } +} diff --git a/internal/pubsub/query/query.go b/internal/pubsub/query/query.go new file mode 100644 index 0000000000..23510a75d2 --- /dev/null +++ b/internal/pubsub/query/query.go @@ -0,0 +1,322 @@ +// Package query implements the custom query format used to filter event +// subscriptions in Tendermint. +// +// Query expressions describe properties of events and their attributes, using +// strings like: +// +// abci.invoice.number = 22 AND abci.invoice.owner = 'Ivan' +// +// Query expressions can handle attribute values encoding numbers, strings, +// dates, and timestamps. The complete query grammar is described in the +// query/syntax package. +// +package query + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" +) + +// All is a query that matches all events. +var All *Query + +// A Query is the compiled form of a query. +type Query struct { + ast syntax.Query + conds []condition +} + +// New parses and compiles the query expression into an executable query. +func New(query string) (*Query, error) { + ast, err := syntax.Parse(query) + if err != nil { + return nil, err + } + return Compile(ast) +} + +// MustCompile compiles the query expression into an executable query. +// In case of error, MustCompile will panic. +// +// This is intended for use in program initialization; use query.New if you +// need to check errors. +func MustCompile(query string) *Query { + q, err := New(query) + if err != nil { + panic(err) + } + return q +} + +// Compile compiles the given query AST so it can be used to match events. +func Compile(ast syntax.Query) (*Query, error) { + conds := make([]condition, len(ast)) + for i, q := range ast { + cond, err := compileCondition(q) + if err != nil { + return nil, fmt.Errorf("compile %s: %w", q, err) + } + conds[i] = cond + } + return &Query{ast: ast, conds: conds}, nil +} + +// Matches reports whether q matches the given events. If q == nil, the query +// matches any non-empty collection of events. +func (q *Query) Matches(events []types.Event) bool { + if q == nil { + return true + } + for _, cond := range q.conds { + if !cond.matchesAny(events) { + return false + } + } + return len(events) != 0 +} + +// String matches part of the pubsub.Query interface. +func (q *Query) String() string { + if q == nil { + return "" + } + return q.ast.String() +} + +// Syntax returns the syntax tree representation of q. +func (q *Query) Syntax() syntax.Query { + if q == nil { + return nil + } + return q.ast +} + +// A condition is a compiled match condition. A condition matches an event if +// the event has the designated type, contains an attribute with the given +// name, and the match function returns true for the attribute value. +type condition struct { + tag string // e.g., "tx.hash" + match func(s string) bool +} + +// findAttr returns a slice of attribute values from event matching the +// condition tag, and reports whether the event type strictly equals the +// condition tag. +func (c condition) findAttr(event types.Event) ([]string, bool) { + if !strings.HasPrefix(c.tag, event.Type) { + return nil, false // type does not match tag + } else if len(c.tag) == len(event.Type) { + return nil, true // type == tag + } + var vals []string + for _, attr := range event.Attributes { + fullName := event.Type + "." + attr.Key + if fullName == c.tag { + vals = append(vals, attr.Value) + } + } + return vals, false +} + +// matchesAny reports whether c matches at least one of the given events. +func (c condition) matchesAny(events []types.Event) bool { + for _, event := range events { + if c.matchesEvent(event) { + return true + } + } + return false +} + +// matchesEvent reports whether c matches the given event. +func (c condition) matchesEvent(event types.Event) bool { + vs, tagEqualsType := c.findAttr(event) + if len(vs) == 0 { + // As a special case, a condition tag that exactly matches the event type + // is matched against an empty string. This allows existence checks to + // work for type-only queries. + if tagEqualsType { + return c.match("") + } + return false + } + + // At this point, we have candidate values. + for _, v := range vs { + if c.match(v) { + return true + } + } + return false +} + +func compileCondition(cond syntax.Condition) (condition, error) { + out := condition{tag: cond.Tag} + + // Handle existence checks separately to simplify the logic below for + // comparisons that take arguments. + if cond.Op == syntax.TExists { + out.match = func(string) bool { return true } + return out, nil + } + + // All the other operators require an argument. + if cond.Arg == nil { + return condition{}, fmt.Errorf("missing argument for %v", cond.Op) + } + + // Precompile the argument value matcher. + argType := cond.Arg.Type + var argValue interface{} + + switch argType { + case syntax.TString: + argValue = cond.Arg.Value() + case syntax.TNumber: + argValue = cond.Arg.Number() + case syntax.TTime, syntax.TDate: + argValue = cond.Arg.Time() + default: + return condition{}, fmt.Errorf("unknown argument type %v", argType) + } + + mcons := opTypeMap[cond.Op][argType] + if mcons == nil { + return condition{}, fmt.Errorf("invalid op/arg combination (%v, %v)", cond.Op, argType) + } + out.match = mcons(argValue) + return out, nil +} + +// TODO(creachadair): The existing implementation allows anything number shaped +// to be treated as a number. This preserves the parts of that behavior we had +// tests for, but we should probably get rid of that. +var extractNum = regexp.MustCompile(`^\d+(\.\d+)?`) + +func parseNumber(s string) (float64, error) { + return strconv.ParseFloat(extractNum.FindString(s), 64) +} + +// A map of operator ⇒ argtype ⇒ match-constructor. +// An entry does not exist if the combination is not valid. +// +// Disable the dupl lint for this map. The result isn't even correct. +//nolint:dupl +var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) bool{ + syntax.TContains: { + syntax.TString: func(v interface{}) func(string) bool { + return func(s string) bool { + return strings.Contains(s, v.(string)) + } + }, + }, + syntax.TEq: { + syntax.TString: func(v interface{}) func(string) bool { + return func(s string) bool { return s == v.(string) } + }, + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w == v.(float64) + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.Equal(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.Equal(v.(time.Time)) + } + }, + }, + syntax.TLt: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w < v.(float64) + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.Before(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.Before(v.(time.Time)) + } + }, + }, + syntax.TLeq: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w <= v.(float64) + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && !ts.After(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && !ts.After(v.(time.Time)) + } + }, + }, + syntax.TGt: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w > v.(float64) + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.After(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.After(v.(time.Time)) + } + }, + }, + syntax.TGeq: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w >= v.(float64) + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && !ts.Before(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && !ts.Before(v.(time.Time)) + } + }, + }, +} diff --git a/internal/pubsub/query/query_test.go b/internal/pubsub/query/query_test.go new file mode 100644 index 0000000000..fc5fd82f00 --- /dev/null +++ b/internal/pubsub/query/query_test.go @@ -0,0 +1,265 @@ +package query_test + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" +) + +// Example events from the OpenAPI documentation: +// https://github.com/tendermint/tendermint/blob/master/rpc/openapi/openapi.yaml +// +// Redactions: +// +// - Add an explicit "tm" event for the built-in attributes. +// - Remove Index fields (not relevant to tests). +// - Add explicit balance values (to use in tests). +// +var apiEvents = []types.Event{ + { + Type: "tm", + Attributes: []types.EventAttribute{ + {Key: "event", Value: "Tx"}, + {Key: "hash", Value: "XYZ"}, + {Key: "height", Value: "5"}, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + {Key: "address", Value: "AddrA"}, + {Key: "source", Value: "SrcX"}, + {Key: "amount", Value: "100"}, + {Key: "balance", Value: "1500"}, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + {Key: "address", Value: "AddrB"}, + {Key: "source", Value: "SrcY"}, + {Key: "amount", Value: "45"}, + {Key: "balance", Value: "999"}, + }, + }, + { + Type: "transfer", + Attributes: []types.EventAttribute{ + {Key: "sender", Value: "AddrC"}, + {Key: "recipient", Value: "AddrD"}, + {Key: "amount", Value: "160"}, + }, + }, +} + +func TestCompiledMatches(t *testing.T) { + var ( + txDate = "2017-01-01" + txTime = "2018-05-03T14:45:00Z" + ) + + testCases := []struct { + s string + events []types.Event + matches bool + }{ + {`tm.events.type='NewBlock'`, + newTestEvents(`tm|events.type=NewBlock`), + true}, + {`tx.gas > 7`, + newTestEvents(`tx|gas=8`), + true}, + {`transfer.amount > 7`, + newTestEvents(`transfer|amount=8stake`), + true}, + {`transfer.amount > 7`, + newTestEvents(`transfer|amount=8.045`), + true}, + {`transfer.amount > 7.043`, + newTestEvents(`transfer|amount=8.045stake`), + true}, + {`transfer.amount > 8.045`, + newTestEvents(`transfer|amount=8.045stake`), + false}, + {`tx.gas > 7 AND tx.gas < 9`, + newTestEvents(`tx|gas=8`), + true}, + {`body.weight >= 3.5`, + newTestEvents(`body|weight=3.5`), + true}, + {`account.balance < 1000.0`, + newTestEvents(`account|balance=900`), + true}, + {`apples.kg <= 4`, + newTestEvents(`apples|kg=4.0`), + true}, + {`body.weight >= 4.5`, + newTestEvents(`body|weight=4.5`), + true}, + {`oranges.kg < 4 AND watermellons.kg > 10`, + newTestEvents(`oranges|kg=3`, `watermellons|kg=12`), + true}, + {`peaches.kg < 4`, + newTestEvents(`peaches|kg=5`), + false}, + {`tx.date > DATE 2017-01-01`, + newTestEvents(`tx|date=` + time.Now().Format(syntax.DateFormat)), + true}, + {`tx.date = DATE 2017-01-01`, + newTestEvents(`tx|date=` + txDate), + true}, + {`tx.date = DATE 2018-01-01`, + newTestEvents(`tx|date=` + txDate), + false}, + {`tx.time >= TIME 2013-05-03T14:45:00Z`, + newTestEvents(`tx|time=` + time.Now().Format(syntax.TimeFormat)), + true}, + {`tx.time = TIME 2013-05-03T14:45:00Z`, + newTestEvents(`tx|time=` + txTime), + false}, + {`abci.owner.name CONTAINS 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name CONTAINS 'Igor'`, + newTestEvents(`abci|owner.name=Pavel|owner.name=Ivan`), + false}, + {`abci.owner.name = 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan' AND abci.owner.name = 'John'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + false}, + {`tm.events.type='NewBlock'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`tm.events.type='NewBlock' AND app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`tm.events.type='NewHeader' AND app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + false}, + {`slash EXISTS`, + newTestEvents(`slash|reason=missing_signature|power=6000`), + true}, + {`slash EXISTS`, + newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), + false}, + {`slash.reason EXISTS AND slash.power > 1000`, + newTestEvents(`slash|reason=missing_signature|power=6000`), + true}, + {`slash.reason EXISTS AND slash.power > 1000`, + newTestEvents(`slash|reason=missing_signature|power=500`), + false}, + {`slash.reason EXISTS`, + newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), + false}, + + // Test cases based on the OpenAPI examples. + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'`, + apiEvents, true}, + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'SrcY'`, + apiEvents, true}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrA'`, + apiEvents, false}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrC'`, + apiEvents, true}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrZ'`, + apiEvents, false}, + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'`, + apiEvents, false}, + {`tm.event = 'Tx' AND rewards.withdraw.source = 'W'`, + apiEvents, false}, + } + + // NOTE: The original implementation allowed arbitrary prefix matches on + // attribute tags, e.g., "sl" would match "slash". + // + // That is weird and probably wrong: "foo.ba" should not match "foo.bar", + // or there is no way to distinguish the case where there were two values + // for "foo.bar" or one value each for "foo.ba" and "foo.bar". + // + // Apart from a single test case, I could not find any attested usage of + // this implementation detail. It isn't documented in the OpenAPI docs and + // is not shown in any of the example inputs. + // + // On that basis, I removed that test case. This implementation still does + // correctly handle variable type/attribute splits ("x", "y.z" / "x.y", "z") + // since that was required by the original "flattened" event representation. + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%02d", i+1), func(t *testing.T) { + c, err := query.New(tc.s) + if err != nil { + t.Fatalf("NewCompiled %#q: unexpected error: %v", tc.s, err) + } + + got := c.Matches(tc.events) + if got != tc.matches { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got %v, want %v", + tc.s, tc.events, got, tc.matches) + } + }) + } +} + +func TestAllMatchesAll(t *testing.T) { + events := newTestEvents( + ``, + `Asher|Roth=`, + `Route|66=`, + `Rilly|Blue=`, + ) + for i := 0; i < len(events); i++ { + if !query.All.Matches(events[:i]) { + t.Errorf("Did not match on %+v ", events[:i]) + } + } +} + +// newTestEvent constructs an Event message from a template string. +// The format is "type|attr1=val1|attr2=val2|...". +func newTestEvent(s string) types.Event { + var event types.Event + parts := strings.Split(s, "|") + event.Type = parts[0] + if len(parts) == 1 { + return event // type only, no attributes + } + for _, kv := range parts[1:] { + key, val := splitKV(kv) + event.Attributes = append(event.Attributes, types.EventAttribute{ + Key: key, + Value: val, + }) + } + return event +} + +// newTestEvents constructs a slice of Event messages by applying newTestEvent +// to each element of ss. +func newTestEvents(ss ...string) []types.Event { + events := make([]types.Event, len(ss)) + for i, s := range ss { + events[i] = newTestEvent(s) + } + return events +} + +func splitKV(s string) (key, value string) { + kv := strings.SplitN(s, "=", 2) + return kv[0], kv[1] +} diff --git a/internal/pubsub/query/syntax/doc.go b/internal/pubsub/query/syntax/doc.go new file mode 100644 index 0000000000..e7a9896c44 --- /dev/null +++ b/internal/pubsub/query/syntax/doc.go @@ -0,0 +1,34 @@ +// Package syntax defines a scanner and parser for the Tendermint event filter +// query language. A query selects events by their types and attribute values. +// +// Grammar +// +// The grammar of the query language is defined by the following EBNF: +// +// query = conditions EOF +// conditions = condition {"AND" condition} +// condition = tag comparison +// comparison = equal / order / contains / "EXISTS" +// equal = "=" (date / number / time / value) +// order = cmp (date / number / time) +// contains = "CONTAINS" value +// cmp = "<" / "<=" / ">" / ">=" +// +// The lexical terms are defined here using RE2 regular expression notation: +// +// // The name of an event attribute (type.value) +// tag = #'\w+(\.\w+)*' +// +// // A datestamp (YYYY-MM-DD) +// date = #'DATE \d{4}-\d{2}-\d{2}' +// +// // A number with optional fractional parts (0, 10, 3.25) +// number = #'\d+(\.\d+)?' +// +// // An RFC3339 timestamp (2021-11-23T22:04:19-09:00) +// time = #'TIME \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}([-+]\d{2}:\d{2}|Z)' +// +// // A quoted literal string value ('a b c') +// value = #'\'[^\']*\'' +// +package syntax diff --git a/internal/pubsub/query/syntax/parser.go b/internal/pubsub/query/syntax/parser.go new file mode 100644 index 0000000000..a100ec79c7 --- /dev/null +++ b/internal/pubsub/query/syntax/parser.go @@ -0,0 +1,213 @@ +package syntax + +import ( + "fmt" + "io" + "math" + "strconv" + "strings" + "time" +) + +// Parse parses the specified query string. It is shorthand for constructing a +// parser for s and calling its Parse method. +func Parse(s string) (Query, error) { + return NewParser(strings.NewReader(s)).Parse() +} + +// Query is the root of the parse tree for a query. A query is the conjunction +// of one or more conditions. +type Query []Condition + +func (q Query) String() string { + ss := make([]string, len(q)) + for i, cond := range q { + ss[i] = cond.String() + } + return strings.Join(ss, " AND ") +} + +// A Condition is a single conditional expression, consisting of a tag, a +// comparison operator, and an optional argument. The type of the argument +// depends on the operator. +type Condition struct { + Tag string + Op Token + Arg *Arg + + opText string +} + +func (c Condition) String() string { + s := c.Tag + " " + c.opText + if c.Arg != nil { + return s + " " + c.Arg.String() + } + return s +} + +// An Arg is the argument of a comparison operator. +type Arg struct { + Type Token + text string +} + +func (a *Arg) String() string { + if a == nil { + return "" + } + switch a.Type { + case TString: + return "'" + a.text + "'" + case TTime: + return "TIME " + a.text + case TDate: + return "DATE " + a.text + default: + return a.text + } +} + +// Number returns the value of the argument text as a number, or a NaN if the +// text does not encode a valid number value. +func (a *Arg) Number() float64 { + if a == nil { + return -1 + } + v, err := strconv.ParseFloat(a.text, 64) + if err == nil && v >= 0 { + return v + } + return math.NaN() +} + +// Time returns the value of the argument text as a time, or the zero value if +// the text does not encode a timestamp or datestamp. +func (a *Arg) Time() time.Time { + var ts time.Time + if a == nil { + return ts + } + var err error + switch a.Type { + case TDate: + ts, err = ParseDate(a.text) + case TTime: + ts, err = ParseTime(a.text) + } + if err == nil { + return ts + } + return time.Time{} +} + +// Value returns the value of the argument text as a string, or "". +func (a *Arg) Value() string { + if a == nil { + return "" + } + return a.text +} + +// Parser is a query expression parser. The grammar for query expressions is +// defined in the syntax package documentation. +type Parser struct { + scanner *Scanner +} + +// NewParser constructs a new parser that reads the input from r. +func NewParser(r io.Reader) *Parser { + return &Parser{scanner: NewScanner(r)} +} + +// Parse parses the complete input and returns the resulting query. +func (p *Parser) Parse() (Query, error) { + cond, err := p.parseCond() + if err != nil { + return nil, err + } + conds := []Condition{cond} + for p.scanner.Next() != io.EOF { + if tok := p.scanner.Token(); tok != TAnd { + return nil, fmt.Errorf("offset %d: got %v, want %v", p.scanner.Pos(), tok, TAnd) + } + cond, err := p.parseCond() + if err != nil { + return nil, err + } + conds = append(conds, cond) + } + return conds, nil +} + +// parseCond parses a conditional expression: tag OP value. +func (p *Parser) parseCond() (Condition, error) { + var cond Condition + if err := p.require(TTag); err != nil { + return cond, err + } + cond.Tag = p.scanner.Text() + if err := p.require(TLeq, TGeq, TLt, TGt, TEq, TContains, TExists); err != nil { + return cond, err + } + cond.Op = p.scanner.Token() + cond.opText = p.scanner.Text() + + var err error + switch cond.Op { + case TLeq, TGeq, TLt, TGt: + err = p.require(TNumber, TTime, TDate) + case TEq: + err = p.require(TNumber, TTime, TDate, TString) + case TContains: + err = p.require(TString) + case TExists: + // no argument + return cond, nil + default: + return cond, fmt.Errorf("offset %d: unexpected operator %v", p.scanner.Pos(), cond.Op) + } + if err != nil { + return cond, err + } + cond.Arg = &Arg{Type: p.scanner.Token(), text: p.scanner.Text()} + return cond, nil +} + +// require advances the scanner and requires that the resulting token is one of +// the specified token types. +func (p *Parser) require(tokens ...Token) error { + if err := p.scanner.Next(); err != nil { + return fmt.Errorf("offset %d: %w", p.scanner.Pos(), err) + } + got := p.scanner.Token() + for _, tok := range tokens { + if tok == got { + return nil + } + } + return fmt.Errorf("offset %d: got %v, wanted %s", p.scanner.Pos(), got, tokLabel(tokens)) +} + +// tokLabel makes a human-readable summary string for the given token types. +func tokLabel(tokens []Token) string { + if len(tokens) == 1 { + return tokens[0].String() + } + last := len(tokens) - 1 + ss := make([]string, len(tokens)-1) + for i, tok := range tokens[:last] { + ss[i] = tok.String() + } + return strings.Join(ss, ", ") + " or " + tokens[last].String() +} + +// ParseDate parses s as a date string in the format used by DATE values. +func ParseDate(s string) (time.Time, error) { + return time.Parse("2006-01-02", s) +} + +// ParseTime parses s as a timestamp in the format used by TIME values. +func ParseTime(s string) (time.Time, error) { + return time.Parse(time.RFC3339, s) +} diff --git a/internal/pubsub/query/syntax/scanner.go b/internal/pubsub/query/syntax/scanner.go new file mode 100644 index 0000000000..332e3f7b14 --- /dev/null +++ b/internal/pubsub/query/syntax/scanner.go @@ -0,0 +1,312 @@ +package syntax + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + "time" + "unicode" +) + +// Token is the type of a lexical token in the query grammar. +type Token byte + +const ( + TInvalid = iota // invalid or unknown token + TTag // field tag: x.y + TString // string value: 'foo bar' + TNumber // number: 0, 15.5, 100 + TTime // timestamp: TIME yyyy-mm-ddThh:mm:ss([-+]hh:mm|Z) + TDate // datestamp: DATE yyyy-mm-dd + TAnd // operator: AND + TContains // operator: CONTAINS + TExists // operator: EXISTS + TEq // operator: = + TLt // operator: < + TLeq // operator: <= + TGt // operator: > + TGeq // operator: >= + + // Do not reorder these values without updating the scanner code. +) + +var tString = [...]string{ + TInvalid: "invalid token", + TTag: "tag", + TString: "string", + TNumber: "number", + TTime: "timestamp", + TDate: "datestamp", + TAnd: "AND operator", + TContains: "CONTAINS operator", + TExists: "EXISTS operator", + TEq: "= operator", + TLt: "< operator", + TLeq: "<= operator", + TGt: "> operator", + TGeq: ">= operator", +} + +func (t Token) String() string { + v := int(t) + if v > len(tString) { + return "unknown token type" + } + return tString[v] +} + +const ( + // TimeFormat is the format string used for timestamp values. + TimeFormat = time.RFC3339 + + // DateFormat is the format string used for datestamp values. + DateFormat = "2006-01-02" +) + +// Scanner reads lexical tokens of the query language from an input stream. +// Each call to Next advances the scanner to the next token, or reports an +// error. +type Scanner struct { + r *bufio.Reader + buf bytes.Buffer + tok Token + err error + + pos, last, end int +} + +// NewScanner constructs a new scanner that reads from r. +func NewScanner(r io.Reader) *Scanner { return &Scanner{r: bufio.NewReader(r)} } + +// Next advances s to the next token in the input, or reports an error. At the +// end of input, Next returns io.EOF. +func (s *Scanner) Next() error { + s.buf.Reset() + s.pos = s.end + s.tok = TInvalid + s.err = nil + + for { + ch, err := s.rune() + if err != nil { + return s.fail(err) + } + if unicode.IsSpace(ch) { + s.pos = s.end + continue // skip whitespace + } + if '0' <= ch && ch <= '9' { + return s.scanNumber(ch) + } else if isTagRune(ch) { + return s.scanTagLike(ch) + } + switch ch { + case '\'': + return s.scanString(ch) + case '<', '>', '=': + return s.scanCompare(ch) + default: + return s.invalid(ch) + } + } +} + +// Token returns the type of the current input token. +func (s *Scanner) Token() Token { return s.tok } + +// Text returns the text of the current input token. +func (s *Scanner) Text() string { return s.buf.String() } + +// Pos returns the start offset of the current token in the input. +func (s *Scanner) Pos() int { return s.pos } + +// Err returns the last error reported by Next, if any. +func (s *Scanner) Err() error { return s.err } + +// scanNumber scans for numbers with optional fractional parts. +// Examples: 0, 1, 3.14 +func (s *Scanner) scanNumber(first rune) error { + s.buf.WriteRune(first) + if err := s.scanWhile(isDigit); err != nil { + return err + } + + ch, err := s.rune() + if err != nil && err != io.EOF { + return err + } + if ch == '.' { + s.buf.WriteRune(ch) + if err := s.scanWhile(isDigit); err != nil { + return err + } + } else { + s.unrune() + } + s.tok = TNumber + return nil +} + +func (s *Scanner) scanString(first rune) error { + // discard opening quote + for { + ch, err := s.rune() + if err != nil { + return s.fail(err) + } else if ch == first { + // discard closing quote + s.tok = TString + return nil + } + s.buf.WriteRune(ch) + } +} + +func (s *Scanner) scanCompare(first rune) error { + s.buf.WriteRune(first) + switch first { + case '=': + s.tok = TEq + return nil + case '<': + s.tok = TLt + case '>': + s.tok = TGt + default: + return s.invalid(first) + } + + ch, err := s.rune() + if err == io.EOF { + return nil // the assigned token is correct + } else if err != nil { + return s.fail(err) + } + if ch == '=' { + s.buf.WriteRune(ch) + s.tok++ // depends on token order + return nil + } + s.unrune() + return nil +} + +func (s *Scanner) scanTagLike(first rune) error { + s.buf.WriteRune(first) + var hasSpace bool + for { + ch, err := s.rune() + if err == io.EOF { + break + } else if err != nil { + return s.fail(err) + } + if !isTagRune(ch) { + hasSpace = ch == ' ' // to check for TIME, DATE + break + } + s.buf.WriteRune(ch) + } + + text := s.buf.String() + switch text { + case "TIME": + if hasSpace { + return s.scanTimestamp() + } + s.tok = TTag + case "DATE": + if hasSpace { + return s.scanDatestamp() + } + s.tok = TTag + case "AND": + s.tok = TAnd + case "EXISTS": + s.tok = TExists + case "CONTAINS": + s.tok = TContains + default: + s.tok = TTag + } + s.unrune() + return nil +} + +func (s *Scanner) scanTimestamp() error { + s.buf.Reset() // discard "TIME" label + if err := s.scanWhile(isTimeRune); err != nil { + return err + } + if ts, err := time.Parse(TimeFormat, s.buf.String()); err != nil { + return s.fail(fmt.Errorf("invalid TIME value: %w", err)) + } else if y := ts.Year(); y < 1900 || y > 2999 { + return s.fail(fmt.Errorf("timestamp year %d out of range", ts.Year())) + } + s.tok = TTime + return nil +} + +func (s *Scanner) scanDatestamp() error { + s.buf.Reset() // discard "DATE" label + if err := s.scanWhile(isDateRune); err != nil { + return err + } + if ts, err := time.Parse(DateFormat, s.buf.String()); err != nil { + return s.fail(fmt.Errorf("invalid DATE value: %w", err)) + } else if y := ts.Year(); y < 1900 || y > 2999 { + return s.fail(fmt.Errorf("datestamp year %d out of range", ts.Year())) + } + s.tok = TDate + return nil +} + +func (s *Scanner) scanWhile(ok func(rune) bool) error { + for { + ch, err := s.rune() + if err == io.EOF { + return nil + } else if err != nil { + return s.fail(err) + } else if !ok(ch) { + s.unrune() + return nil + } + s.buf.WriteRune(ch) + } +} + +func (s *Scanner) rune() (rune, error) { + ch, nb, err := s.r.ReadRune() + s.last = nb + s.end += nb + return ch, err +} + +func (s *Scanner) unrune() { + _ = s.r.UnreadRune() + s.end -= s.last +} + +func (s *Scanner) fail(err error) error { + s.err = err + return err +} + +func (s *Scanner) invalid(ch rune) error { + return s.fail(fmt.Errorf("invalid input %c at offset %d", ch, s.end)) +} + +func isDigit(r rune) bool { return '0' <= r && r <= '9' } + +func isTagRune(r rune) bool { + return r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +func isTimeRune(r rune) bool { + return strings.ContainsRune("-T:+Z", r) || isDigit(r) +} + +func isDateRune(r rune) bool { return isDigit(r) || r == '-' } diff --git a/internal/pubsub/query/syntax/syntax_test.go b/internal/pubsub/query/syntax/syntax_test.go new file mode 100644 index 0000000000..9ce5fa7352 --- /dev/null +++ b/internal/pubsub/query/syntax/syntax_test.go @@ -0,0 +1,190 @@ +package syntax_test + +import ( + "io" + "reflect" + "strings" + "testing" + + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" +) + +func TestScanner(t *testing.T) { + tests := []struct { + input string + want []syntax.Token + }{ + // Empty inputs + {"", nil}, + {" ", nil}, + {"\t\n ", nil}, + + // Numbers + {`0 123`, []syntax.Token{syntax.TNumber, syntax.TNumber}}, + {`0.32 3.14`, []syntax.Token{syntax.TNumber, syntax.TNumber}}, + + // Tags + {`foo foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + + // Strings (values) + {` '' x 'x' 'x y'`, []syntax.Token{syntax.TString, syntax.TTag, syntax.TString, syntax.TString}}, + {` 'you are not your job' `, []syntax.Token{syntax.TString}}, + + // Comparison operators + {`< <= = > >=`, []syntax.Token{ + syntax.TLt, syntax.TLeq, syntax.TEq, syntax.TGt, syntax.TGeq, + }}, + + // Mixed values of various kinds. + {`x AND y`, []syntax.Token{syntax.TTag, syntax.TAnd, syntax.TTag}}, + {`x.y CONTAINS 'z'`, []syntax.Token{syntax.TTag, syntax.TContains, syntax.TString}}, + {`foo EXISTS`, []syntax.Token{syntax.TTag, syntax.TExists}}, + {`and AND`, []syntax.Token{syntax.TTag, syntax.TAnd}}, + + // Timestamp + {`TIME 2021-11-23T15:16:17Z`, []syntax.Token{syntax.TTime}}, + + // Datestamp + {`DATE 2021-11-23`, []syntax.Token{syntax.TDate}}, + } + + for _, test := range tests { + s := syntax.NewScanner(strings.NewReader(test.input)) + var got []syntax.Token + for s.Next() == nil { + got = append(got, s.Token()) + } + if err := s.Err(); err != io.EOF { + t.Errorf("Next: unexpected error: %w", err) + } + + if !reflect.DeepEqual(got, test.want) { + t.Logf("Scanner input: %q", test.input) + t.Errorf("Wrong tokens:\ngot: %+v\nwant: %+v", got, test.want) + } + } +} + +func TestScannerErrors(t *testing.T) { + tests := []struct { + input string + }{ + {`'incomplete string`}, + {`-23`}, + {`&`}, + {`DATE xyz-pdq`}, + {`DATE xyzp-dq-zv`}, + {`DATE 0000-00-00`}, + {`DATE 0000-00-000`}, + {`DATE 2021-01-99`}, + {`TIME 2021-01-01T34:56:78Z`}, + {`TIME 2021-01-99T14:56:08Z`}, + {`TIME 2021-01-99T34:56:08`}, + {`TIME 2021-01-99T34:56:11+3`}, + } + for _, test := range tests { + s := syntax.NewScanner(strings.NewReader(test.input)) + if err := s.Next(); err == nil { + t.Errorf("Next: got %v (%#q), want error", s.Token(), s.Text()) + } + } +} + +// These parser tests were copied from the original implementation of the query +// parser, and are preserved here as a compatibility check. +func TestParseValid(t *testing.T) { + tests := []struct { + input string + valid bool + }{ + {"tm.events.type='NewBlock'", true}, + {"tm.events.type = 'NewBlock'", true}, + {"tm.events.name = ''", true}, + {"tm.events.type='TIME'", true}, + {"tm.events.type='DATE'", true}, + {"tm.events.type='='", true}, + {"tm.events.type='TIME", false}, + {"tm.events.type=TIME'", false}, + {"tm.events.type==", false}, + {"tm.events.type=NewBlock", false}, + {">==", false}, + {"tm.events.type 'NewBlock' =", false}, + {"tm.events.type>'NewBlock'", false}, + {"", false}, + {"=", false}, + {"='NewBlock'", false}, + {"tm.events.type=", false}, + + {"tm.events.typeNewBlock", false}, + {"tm.events.type'NewBlock'", false}, + {"'NewBlock'", false}, + {"NewBlock", false}, + {"", false}, + + {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, + {"tm.events.type='NewBlock' AND", false}, + {"tm.events.type='NewBlock' AN", false}, + {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, + {"AND tm.events.type='NewBlock' ", false}, + + {"abci.account.name CONTAINS 'Igor'", true}, + + {"tx.date > DATE 2013-05-03", true}, + {"tx.date < DATE 2013-05-03", true}, + {"tx.date <= DATE 2013-05-03", true}, + {"tx.date >= DATE 2013-05-03", true}, + {"tx.date >= DAT 2013-05-03", false}, + {"tx.date <= DATE2013-05-03", false}, + {"tx.date <= DATE -05-03", false}, + {"tx.date >= DATE 20130503", false}, + {"tx.date >= DATE 2013+01-03", false}, + // incorrect year, month, day + {"tx.date >= DATE 0013-01-03", false}, + {"tx.date >= DATE 2013-31-03", false}, + {"tx.date >= DATE 2013-01-83", false}, + + {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, + {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, + {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME2013-05-03T14:45:00Z", false}, + {"tx.date = IME 2013-05-03T14:45:00Z", false}, + {"tx.date = TIME 2013-05-:45:00Z", false}, + {"tx.date >= TIME 2013-05-03T14:45:00", false}, + {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, + {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, + + {"account.balance=100", true}, + {"account.balance >= 200", true}, + {"account.balance >= -300", false}, + {"account.balance >>= 400", false}, + {"account.balance=33.22.1", false}, + + {"slashing.amount EXISTS", true}, + {"slashing.amount EXISTS AND account.balance=100", true}, + {"account.balance=100 AND slashing.amount EXISTS", true}, + {"slashing EXISTS", true}, + + {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, + {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + } + + for _, test := range tests { + q, err := syntax.Parse(test.input) + if test.valid != (err == nil) { + t.Errorf("Parse %#q: valid %v got err=%v", test.input, test.valid, err) + } + + // For valid queries, check that the query round-trips. + if test.valid { + qstr := q.String() + r, err := syntax.Parse(qstr) + if err != nil { + t.Errorf("Reparse %#q failed: %v", qstr, err) + } + if rstr := r.String(); rstr != qstr { + t.Errorf("Reparse diff\nold: %#q\nnew: %#q", qstr, rstr) + } + } + } +} diff --git a/internal/pubsub/subindex.go b/internal/pubsub/subindex.go new file mode 100644 index 0000000000..eadb193af2 --- /dev/null +++ b/internal/pubsub/subindex.go @@ -0,0 +1,117 @@ +package pubsub + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/types" +) + +// An item to be published to subscribers. +type item struct { + Data types.EventData + Events []abci.Event +} + +// A subInfo value records a single subscription. +type subInfo struct { + clientID string // chosen by the client + query *query.Query // chosen by the client + subID string // assigned at registration + sub *Subscription // receives published events +} + +// A subInfoSet is an unordered set of subscription info records. +type subInfoSet map[*subInfo]struct{} + +func (s subInfoSet) contains(si *subInfo) bool { _, ok := s[si]; return ok } +func (s subInfoSet) add(si *subInfo) { s[si] = struct{}{} } +func (s subInfoSet) remove(si *subInfo) { delete(s, si) } + +// withQuery returns the subset of s whose query string matches qs. +func (s subInfoSet) withQuery(qs string) subInfoSet { + out := make(subInfoSet) + for si := range s { + if si.query.String() == qs { + out.add(si) + } + } + return out +} + +// A subIndex is an indexed collection of subscription info records. +// The index is not safe for concurrent use without external synchronization. +type subIndex struct { + all subInfoSet // all subscriptions + byClient map[string]subInfoSet // per-client subscriptions + byQuery map[string]subInfoSet // per-query subscriptions + + // TODO(creachadair): We allow indexing by query to support existing use by + // the RPC service methods for event streaming. Fix up those methods not to + // require this, and then remove indexing by query. +} + +// newSubIndex constructs a new, empty subscription index. +func newSubIndex() *subIndex { + return &subIndex{ + all: make(subInfoSet), + byClient: make(map[string]subInfoSet), + byQuery: make(map[string]subInfoSet), + } +} + +// findClients returns the set of subscriptions for the given client ID, or nil. +func (idx *subIndex) findClientID(id string) subInfoSet { return idx.byClient[id] } + +// findQuery returns the set of subscriptions on the given query string, or nil. +func (idx *subIndex) findQuery(qs string) subInfoSet { return idx.byQuery[qs] } + +// contains reports whether idx contains any subscription matching the given +// client ID and query pair. +func (idx *subIndex) contains(clientID, query string) bool { + csubs, qsubs := idx.byClient[clientID], idx.byQuery[query] + if len(csubs) == 0 || len(qsubs) == 0 { + return false + } + for si := range csubs { + if qsubs.contains(si) { + return true + } + } + return false +} + +// add adds si to the index, replacing any previous entry with the same terms. +// It is the caller's responsibility to check for duplicates before adding. +// See also the contains method. +func (idx *subIndex) add(si *subInfo) { + idx.all.add(si) + if m := idx.byClient[si.clientID]; m == nil { + idx.byClient[si.clientID] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } + qs := si.query.String() + if m := idx.byQuery[qs]; m == nil { + idx.byQuery[qs] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } +} + +// removeAll removes all the elements of s from the index. +func (idx *subIndex) removeAll(s subInfoSet) { + for si := range s { + idx.all.remove(si) + idx.byClient[si.clientID].remove(si) + if len(idx.byClient[si.clientID]) == 0 { + delete(idx.byClient, si.clientID) + } + if si.query != nil { + qs := si.query.String() + idx.byQuery[qs].remove(si) + if len(idx.byQuery[qs]) == 0 { + delete(idx.byQuery, qs) + } + } + } +} diff --git a/internal/pubsub/subscription.go b/internal/pubsub/subscription.go new file mode 100644 index 0000000000..933e62e1cc --- /dev/null +++ b/internal/pubsub/subscription.go @@ -0,0 +1,89 @@ +package pubsub + +import ( + "context" + "errors" + + "github.com/google/uuid" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/libs/queue" + "github.com/tendermint/tendermint/types" +) + +var ( + // ErrUnsubscribed is returned by Next when the client has unsubscribed. + ErrUnsubscribed = errors.New("subscription removed by client") + + // ErrTerminated is returned by Next when the subscription was terminated by + // the publisher. + ErrTerminated = errors.New("subscription terminated by publisher") +) + +// A Subscription represents a client subscription for a particular query. +type Subscription struct { + id string + queue *queue.Queue // open until the subscription ends + stopErr error // after queue is closed, the reason why +} + +// newSubscription returns a new subscription with the given queue capacity. +func newSubscription(quota, limit int) (*Subscription, error) { + queue, err := queue.New(queue.Options{ + SoftQuota: quota, + HardLimit: limit, + }) + if err != nil { + return nil, err + } + return &Subscription{ + id: uuid.NewString(), + queue: queue, + }, nil +} + +// Next blocks until a message is available, ctx ends, or the subscription +// ends. Next returns ErrUnsubscribed if s was unsubscribed, ErrTerminated if +// s was terminated by the publisher, or a context error if ctx ended without a +// message being available. +func (s *Subscription) Next(ctx context.Context) (Message, error) { + next, err := s.queue.Wait(ctx) + if errors.Is(err, queue.ErrQueueClosed) { + return Message{}, s.stopErr + } else if err != nil { + return Message{}, err + } + return next.(Message), nil +} + +// ID returns the unique subscription identifier for s. +func (s *Subscription) ID() string { return s.id } + +// publish transmits msg to the subscriber. It reports a queue error if the +// queue cannot accept any further messages. +func (s *Subscription) publish(msg Message) error { return s.queue.Add(msg) } + +// stop terminates the subscription with the given error reason. +func (s *Subscription) stop(err error) { + if err == nil { + panic("nil stop error") + } + s.stopErr = err + s.queue.Close() +} + +// Message glues data and events together. +type Message struct { + subID string + data types.EventData + events []abci.Event +} + +// SubscriptionID returns the unique identifier for the subscription +// that produced this message. +func (msg Message) SubscriptionID() string { return msg.subID } + +// Data returns an original data published. +func (msg Message) Data() types.EventData { return msg.data } + +// Events returns events, which matched the client's query. +func (msg Message) Events() []abci.Event { return msg.events } diff --git a/internal/rpc/core/abci.go b/internal/rpc/core/abci.go index 06c0330509..cbd27a09dc 100644 --- a/internal/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -1,23 +1,24 @@ package core import ( + "context" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // ABCIQuery queries the application for some information. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query func (env *Environment) ABCIQuery( - ctx *rpctypes.Context, + ctx context.Context, path string, data bytes.HexBytes, height int64, prove bool, ) (*coretypes.ResultABCIQuery, error) { - resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ + resQuery, err := env.ProxyAppQuery.Query(ctx, abci.RequestQuery{ Path: path, Data: data, Height: height, @@ -32,8 +33,8 @@ func (env *Environment) ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { - resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) +func (env *Environment) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + resInfo, err := env.ProxyAppQuery.Info(ctx, proxy.RequestInfo) if err != nil { return nil, err } diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 26472fab4b..9f6c872ca0 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -1,15 +1,15 @@ package core import ( + "context" "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -24,7 +24,7 @@ import ( // // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( - ctx *rpctypes.Context, + ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -51,7 +51,8 @@ func (env *Environment) BlockchainInfo( return &coretypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), - BlockMetas: blockMetas}, nil + BlockMetas: blockMetas, + }, nil } // error if either min or max are negative or min > max @@ -91,7 +92,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { +func (env *Environment) Block(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -108,7 +109,7 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretyp // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { +func (env *Environment) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. @@ -122,10 +123,42 @@ func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } +// Header gets block header at a given height. +// If no height is provided, it will fetch the latest header. +// More: https://docs.tendermint.com/master/rpc/#/Info/header +func (env *Environment) Header(ctx context.Context, heightPtr *int64) (*coretypes.ResultHeader, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) + if err != nil { + return nil, err + } + + blockMeta := env.BlockStore.LoadBlockMeta(height) + if blockMeta == nil { + return &coretypes.ResultHeader{}, nil + } + + return &coretypes.ResultHeader{Header: &blockMeta.Header}, nil +} + +// HeaderByHash gets header by hash. +// More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash +func (env *Environment) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { + // N.B. The hash parameter is HexBytes so that the reflective parameter + // decoding logic in the HTTP service will correctly translate from JSON. + // See https://github.com/tendermint/tendermint/issues/6802 for context. + + blockMeta := env.BlockStore.LoadBlockMetaByHash(hash) + if blockMeta == nil { + return &coretypes.ResultHeader{}, nil + } + + return &coretypes.ResultHeader{Header: &blockMeta.Header}, nil +} + // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { +func (env *Environment) Commit(ctx context.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -163,7 +196,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*corety // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -193,7 +226,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* // BlockSearch searches for a paginated set of blocks matching BeginBlock and // EndBlock event search criteria. func (env *Environment) BlockSearch( - ctx *rpctypes.Context, + ctx context.Context, query string, pagePtr, perPagePtr *int, orderBy string, @@ -215,7 +248,7 @@ func (env *Environment) BlockSearch( } } - results, err := kvsink.SearchBlockEvents(ctx.Context(), q) + results, err := kvsink.SearchBlockEvents(ctx, q) if err != nil { return nil, err } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 68237bc0b8..b1746acb7a 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -1,6 +1,7 @@ package core import ( + "context" "fmt" "testing" @@ -11,10 +12,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) func TestBlockchainInfo(t *testing.T) { @@ -84,7 +84,10 @@ func TestBlockResults(t *testing.T) { env.StateStore = sm.NewStore(dbm.NewMemDB()) err := env.StateStore.SaveABCIResponses(100, results) require.NoError(t, err) - env.BlockStore = mockBlockStore{height: 100} + mockstore := &mocks.BlockStore{} + mockstore.On("Height").Return(int64(100)) + mockstore.On("Base").Return(int64(1)) + env.BlockStore = mockstore testCases := []struct { height int64 @@ -105,8 +108,9 @@ func TestBlockResults(t *testing.T) { }}, } + ctx := context.Background() for _, tc := range testCases { - res, err := env.BlockResults(&rpctypes.Context{}, &tc.height) + res, err := env.BlockResults(ctx, &tc.height) if tc.wantErr { assert.Error(t, err) } else { @@ -115,21 +119,3 @@ func TestBlockResults(t *testing.T) { } } } - -type mockBlockStore struct { - height int64 -} - -func (mockBlockStore) Base() int64 { return 1 } -func (store mockBlockStore) Height() int64 { return store.height } -func (store mockBlockStore) Size() int64 { return store.height } -func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil } -func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } -func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } -func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } -func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } -func (mockBlockStore) LoadSeenCommit() *types.Commit { return nil } -func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } -func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { -} diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index d17796fff1..6acdcc3337 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,9 +1,10 @@ package core import ( + "context" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Validators gets the validator set at the given block height. @@ -14,7 +15,7 @@ import ( // // More: https://docs.tendermint.com/master/rpc/#/Info/validators func (env *Environment) Validators( - ctx *rpctypes.Context, + ctx context.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { @@ -50,7 +51,7 @@ func (env *Environment) Validators( // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. var peerStates []coretypes.PeerStateInfo @@ -91,7 +92,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.Re // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &coretypes.ResultConsensusState{RoundState: bz}, err @@ -100,9 +101,7 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.Res // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params -func (env *Environment) ConsensusParams( - ctx *rpctypes.Context, - heightPtr *int64) (*coretypes.ResultConsensusParams, error) { +func (env *Environment) ConsensusParams(ctx context.Context, heightPtr *int64) (*coretypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. diff --git a/internal/rpc/core/dev.go b/internal/rpc/core/dev.go index 21c5154ff5..702413ab89 100644 --- a/internal/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,12 +1,13 @@ package core import ( + "context" + "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index f05c34f146..448cd85a05 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -1,22 +1,31 @@ package core import ( + "context" "encoding/base64" + "encoding/json" "fmt" + "net" + "net/http" "time" + "github.com/rs/cors" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/rpc/coretypes" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" ) @@ -51,11 +60,6 @@ type transport interface { NodeInfo() types.NodeInfo } -type consensusReactor interface { - WaitSync() bool - GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) -} - type peerManager interface { Peers() []types.NodeID Addresses(types.NodeID) []p2p.NodeAddress @@ -74,7 +78,8 @@ type Environment struct { BlockStore sm.BlockStore EvidencePool sm.EvidencePool ConsensusState consensusState - ConsensusReactor consensusReactor + ConsensusReactor *consensus.Reactor + BlockSyncReactor *blocksync.Reactor // Legacy p2p stack P2PTransport transport @@ -86,9 +91,8 @@ type Environment struct { PubKey crypto.PubKey GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink - EventBus *types.EventBus // thread safe + EventBus *eventbus.EventBus // thread safe Mempool mempool.Mempool - BlockSyncReactor consensus.BlockSyncReactor StateSyncMetricer statesync.Metricer Logger log.Logger @@ -150,7 +154,7 @@ func (env *Environment) InitGenesisChunks() error { return nil } - data, err := tmjson.Marshal(env.GenDoc) + data, err := json.Marshal(env.GenDoc) if err != nil { return err } @@ -198,9 +202,107 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, } func (env *Environment) latestUncommittedHeight() int64 { - nodeIsSyncing := env.ConsensusReactor.WaitSync() - if nodeIsSyncing { - return env.BlockStore.Height() + if env.ConsensusReactor != nil { + // consensus reactor can be nil in inspect mode. + + nodeIsSyncing := env.ConsensusReactor.WaitSync() + if nodeIsSyncing { + return env.BlockStore.Height() + } } return env.BlockStore.Height() + 1 } + +// StartService constructs and starts listeners for the RPC service +// according to the config object, returning an error if the service +// cannot be constructed or started. The listeners, which provide +// access to the service, run until the context is canceled. +func (env *Environment) StartService(ctx context.Context, conf *config.Config) ([]net.Listener, error) { + if err := env.InitGenesisChunks(); err != nil { + return nil, err + } + + listenAddrs := strings.SplitAndTrimEmpty(conf.RPC.ListenAddress, ",", " ") + routes := NewRoutesMap(env, &RouteOptions{ + Unsafe: conf.RPC.Unsafe, + }) + + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = conf.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = conf.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = conf.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + // we may expose the rpc over both a unix and tcp socket + listeners := make([]net.Listener, len(listenAddrs)) + for i, listenAddr := range listenAddrs { + mux := http.NewServeMux() + rpcLogger := env.Logger.With("module", "rpc-server") + wmLogger := rpcLogger.With("protocol", "websocket") + wm := rpcserver.NewWebsocketManager(wmLogger, routes, + rpcserver.OnDisconnect(func(remoteAddr string) { + err := env.EventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != tmpubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + }), + rpcserver.ReadLimit(cfg.MaxBodyBytes), + ) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) + listener, err := rpcserver.Listen( + listenAddr, + cfg.MaxOpenConnections, + ) + if err != nil { + return nil, err + } + + var rootHandler http.Handler = mux + if conf.RPC.IsCorsEnabled() { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: conf.RPC.CORSAllowedOrigins, + AllowedMethods: conf.RPC.CORSAllowedMethods, + AllowedHeaders: conf.RPC.CORSAllowedHeaders, + }) + rootHandler = corsMiddleware.Handler(mux) + } + if conf.RPC.IsTLSEnabled() { + go func() { + if err := rpcserver.ServeTLS( + ctx, + listener, + rootHandler, + conf.RPC.CertFile(), + conf.RPC.KeyFile(), + rpcLogger, + cfg, + ); err != nil { + env.Logger.Error("error serving server with TLS", "err", err) + } + }() + } else { + go func() { + if err := rpcserver.Serve( + ctx, + listener, + rootHandler, + rpcLogger, + cfg, + ); err != nil { + env.Logger.Error("error serving server", "err", err) + } + }() + } + + listeners[i] = listener + } + + return listeners, nil + +} diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index 8632e00c1e..054183817d 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -2,11 +2,12 @@ package core import ( "context" + "errors" "fmt" "time" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -14,17 +15,24 @@ import ( const ( // Buffer on the Tendermint (server) side to allow some slowness in clients. subBufferSize = 100 + + // maxQueryLength is the maximum length of a query string that will be + // accepted. This is just a safety check to avoid outlandish queries. + maxQueryLength = 512 ) // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe -func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { - addr := ctx.RemoteAddr() +func (env *Environment) Subscribe(ctx context.Context, query string) (*coretypes.ResultSubscribe, error) { + callInfo := rpctypes.GetCallInfo(ctx) + addr := callInfo.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } env.Logger.Info("Subscribe to query", "remote", addr, "query", query) @@ -34,49 +42,53 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp return nil, fmt.Errorf("failed to parse query: %w", err) } - subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx, SubscribeTimeout) defer cancel() - sub, err := env.EventBus.Subscribe(subCtx, addr, q, subBufferSize) + sub, err := env.EventBus.SubscribeWithArgs(subCtx, tmpubsub.SubscribeArgs{ + ClientID: addr, + Query: q, + Limit: subBufferSize, + }) if err != nil { return nil, err } // Capture the current ID, since it can change in the future. - subscriptionID := ctx.JSONReq.ID + subscriptionID := callInfo.RPCRequest.ID go func() { + opctx, opcancel := context.WithCancel(context.Background()) + defer opcancel() + for { - select { - case msg := <-sub.Out(): - var ( - resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} - resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) - ) - writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { - env.Logger.Info("Can't write response (slow client)", + msg, err := sub.Next(opctx) + if errors.Is(err, tmpubsub.ErrUnsubscribed) { + // The subscription was removed by the client. + return + } else if errors.Is(err, tmpubsub.ErrTerminated) { + // The subscription was terminated by the publisher. + resp := callInfo.RPCRequest.MakeError(err) + ok := callInfo.WSConn.TryWriteRPCResponse(opctx, resp) + if !ok { + env.Logger.Info("Unable to write response (slow client)", "to", addr, "subscriptionID", subscriptionID, "err", err) } - case <-sub.Canceled(): - if sub.Err() != tmpubsub.ErrUnsubscribed { - var reason string - if sub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = sub.Err().Error() - } - var ( - err = fmt.Errorf("subscription was canceled (reason: %s)", reason) - resp = rpctypes.RPCServerError(subscriptionID, err) - ) - if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok { - env.Logger.Info("Can't write response (slow client)", - "to", addr, "subscriptionID", subscriptionID, "err", err) - } - } return } + + // We have a message to deliver to the client. + resp := callInfo.RPCRequest.MakeResponse(&coretypes.ResultEvent{ + Query: query, + Data: msg.Data(), + Events: msg.Events(), + }) + wctx, cancel := context.WithTimeout(opctx, 10*time.Second) + err = callInfo.WSConn.WriteRPCResponse(wctx, resp) + cancel() + if err != nil { + env.Logger.Info("Unable to write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } } }() @@ -85,8 +97,8 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp // Unsubscribe from events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe -func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { - args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} +func (env *Environment) Unsubscribe(ctx context.Context, query string) (*coretypes.ResultUnsubscribe, error) { + args := tmpubsub.UnsubscribeArgs{Subscriber: rpctypes.GetCallInfo(ctx).RemoteAddr()} env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) var err error @@ -96,7 +108,7 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coret args.ID = query } - err = env.EventBus.Unsubscribe(ctx.Context(), args) + err = env.EventBus.Unsubscribe(ctx, args) if err != nil { return nil, err } @@ -105,10 +117,10 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coret // UnsubscribeAll from all events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all -func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { - addr := ctx.RemoteAddr() +func (env *Environment) UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + addr := rpctypes.GetCallInfo(ctx).RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) - err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) + err := env.EventBus.UnsubscribeAll(ctx, addr) if err != nil { return nil, err } diff --git a/internal/rpc/core/evidence.go b/internal/rpc/core/evidence.go index a7641b99d4..e97024b4c7 100644 --- a/internal/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -1,29 +1,26 @@ package core import ( + "context" "fmt" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // BroadcastEvidence broadcasts evidence of the misbehavior. // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( - ctx *rpctypes.Context, - ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - - if ev == nil { + ctx context.Context, + ev coretypes.Evidence, +) (*coretypes.ResultBroadcastEvidence, error) { + if ev.Value == nil { return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest) } - - if err := ev.ValidateBasic(); err != nil { + if err := ev.Value.ValidateBasic(); err != nil { return nil, fmt.Errorf("evidence.ValidateBasic failed: %w", err) } - - if err := env.EvidencePool.AddEvidence(ev); err != nil { + if err := env.EvidencePool.AddEvidence(ev.Value); err != nil { return nil, fmt.Errorf("failed to add evidence: %w", err) } - return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return &coretypes.ResultBroadcastEvidence{Hash: ev.Value.Hash()}, nil } diff --git a/internal/rpc/core/health.go b/internal/rpc/core/health.go index fc355c7e70..c55aa58dca 100644 --- a/internal/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,13 +1,14 @@ package core import ( + "context" + "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { +func (env *Environment) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 5b12a6c2bc..325d293cb9 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -1,6 +1,7 @@ package core import ( + "context" "errors" "fmt" "math/rand" @@ -9,8 +10,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/state/indexer" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -20,8 +21,8 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{}) +func (env *Environment) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + err := env.Mempool.CheckTx(ctx, tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } @@ -32,10 +33,10 @@ func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*c // BroadcastTxSync returns with the response from CheckTx. Does not wait for // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (env *Environment) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := env.Mempool.CheckTx( - ctx.Context(), + ctx, tx, func(res *abci.Response) { resCh <- res }, mempool.TxInfo{}, @@ -59,10 +60,10 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*co // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit -func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll +func (env *Environment) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { resCh := make(chan *abci.Response, 1) err := env.Mempool.CheckTx( - ctx.Context(), + ctx, tx, func(res *abci.Response) { resCh <- res }, mempool.TxInfo{}, @@ -72,6 +73,12 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* } r := (<-resCh).GetCheckTx() + if r.Code != abci.CodeTypeOK { + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, fmt.Errorf("transaction encountered error (%s)", r.MempoolError) + } if !indexer.KVSinkEnabled(env.EventSinks) { return &coretypes.ResultBroadcastTxCommit{ @@ -89,8 +96,8 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* for { count++ select { - case <-ctx.Context().Done(): - env.Logger.Error("Error on broadcastTxCommit", + case <-ctx.Done(): + env.Logger.Error("error on broadcastTxCommit", "duration", time.Since(startAt), "err", err) return &coretypes.ResultBroadcastTxCommit{ @@ -117,24 +124,31 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* } } -// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) -// including their number. +// UnconfirmedTxs gets unconfirmed transactions from the mempool in order of priority // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit := env.validatePerPage(limitPtr) +func (env *Environment) UnconfirmedTxs(ctx context.Context, pagePtr, perPagePtr *int) (*coretypes.ResultUnconfirmedTxs, error) { + totalCount := env.Mempool.Size() + perPage := env.validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) + if err != nil { + return nil, err + } + + skipCount := validateSkipCount(page, perPage) + + txs := env.Mempool.ReapMaxTxs(skipCount + tmmath.MinInt(perPage, totalCount-skipCount)) + result := txs[skipCount:] - txs := env.Mempool.ReapMaxTxs(limit) return &coretypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: env.Mempool.Size(), + Count: len(result), + Total: totalCount, TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil + Txs: result}, nil } // NumUnconfirmedTxs gets number of unconfirmed transactions. // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { +func (env *Environment) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return &coretypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), @@ -144,14 +158,14 @@ func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.Res // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) +func (env *Environment) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + res, err := env.ProxyAppMempool.CheckTx(ctx, abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err } return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil } -func (env *Environment) RemoveTx(ctx *rpctypes.Context, txkey types.TxKey) error { +func (env *Environment) RemoveTx(ctx context.Context, txkey types.TxKey) error { return env.Mempool.RemoveTxByKey(txkey) } diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index fdf4be69b8..3cead393ca 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -1,16 +1,16 @@ package core import ( + "context" "errors" "fmt" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { +func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { peerList := env.PeerManager.Peers() peers := make([]coretypes.Peer, 0, len(peerList)) @@ -36,7 +36,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { +func (env *Environment) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -44,7 +44,7 @@ func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(ctx context.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index fe99d2118e..84e8e199ac 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -1,60 +1,120 @@ package core import ( + "context" + + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/rpc/coretypes" rpc "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/types" ) // TODO: better system than "unsafe" prefix type RoutesMap map[string]*rpc.RPCFunc -// Routes is a map of available routes. -func (env *Environment) GetRoutes() RoutesMap { - return RoutesMap{ +// RouteOptions provide optional settings to NewRoutesMap. A nil *RouteOptions +// is ready for use and provides defaults as specified. +type RouteOptions struct { + Unsafe bool // include "unsafe" methods (default false) +} + +// NewRoutesMap constructs an RPC routing map for the given service +// implementation. If svc implements RPCUnsafe and opts.Unsafe is true, the +// "unsafe" methods will also be added to the map. The caller may also edit the +// map after construction; each call to NewRoutesMap returns a fresh map. +func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { + if opts == nil { + opts = new(RouteOptions) + } + out := RoutesMap{ // subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpc.NewWSRPCFunc(env.Subscribe, "query"), - "unsubscribe": rpc.NewWSRPCFunc(env.Unsubscribe, "query"), - "unsubscribe_all": rpc.NewWSRPCFunc(env.UnsubscribeAll, ""), + "subscribe": rpc.NewWSRPCFunc(svc.Subscribe, "query"), + "unsubscribe": rpc.NewWSRPCFunc(svc.Unsubscribe, "query"), + "unsubscribe_all": rpc.NewWSRPCFunc(svc.UnsubscribeAll), // info API - "health": rpc.NewRPCFunc(env.Health, "", false), - "status": rpc.NewRPCFunc(env.Status, "", false), - "net_info": rpc.NewRPCFunc(env.NetInfo, "", false), - "blockchain": rpc.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), - "genesis": rpc.NewRPCFunc(env.Genesis, "", true), - "genesis_chunked": rpc.NewRPCFunc(env.GenesisChunked, "chunk", true), - "block": rpc.NewRPCFunc(env.Block, "height", true), - "block_by_hash": rpc.NewRPCFunc(env.BlockByHash, "hash", true), - "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), - "commit": rpc.NewRPCFunc(env.Commit, "height", true), - "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), - "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), - "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), - "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), - "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), - "validators": rpc.NewRPCFunc(env.Validators, "height,page,per_page", true), - "dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, "", false), - "consensus_state": rpc.NewRPCFunc(env.GetConsensusState, "", false), - "consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", true), - "unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit", false), - "num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, "", false), + "health": rpc.NewRPCFunc(svc.Health), + "status": rpc.NewRPCFunc(svc.Status), + "net_info": rpc.NewRPCFunc(svc.NetInfo), + "blockchain": rpc.NewRPCFunc(svc.BlockchainInfo, "minHeight", "maxHeight"), + "genesis": rpc.NewRPCFunc(svc.Genesis), + "genesis_chunked": rpc.NewRPCFunc(svc.GenesisChunked, "chunk"), + "header": rpc.NewRPCFunc(svc.Header, "height"), + "header_by_hash": rpc.NewRPCFunc(svc.HeaderByHash, "hash"), + "block": rpc.NewRPCFunc(svc.Block, "height"), + "block_by_hash": rpc.NewRPCFunc(svc.BlockByHash, "hash"), + "block_results": rpc.NewRPCFunc(svc.BlockResults, "height"), + "commit": rpc.NewRPCFunc(svc.Commit, "height"), + "check_tx": rpc.NewRPCFunc(svc.CheckTx, "tx"), + "remove_tx": rpc.NewRPCFunc(svc.RemoveTx, "txkey"), + "tx": rpc.NewRPCFunc(svc.Tx, "hash", "prove"), + "tx_search": rpc.NewRPCFunc(svc.TxSearch, "query", "prove", "page", "per_page", "order_by"), + "block_search": rpc.NewRPCFunc(svc.BlockSearch, "query", "page", "per_page", "order_by"), + "validators": rpc.NewRPCFunc(svc.Validators, "height", "page", "per_page"), + "dump_consensus_state": rpc.NewRPCFunc(svc.DumpConsensusState), + "consensus_state": rpc.NewRPCFunc(svc.GetConsensusState), + "consensus_params": rpc.NewRPCFunc(svc.ConsensusParams, "height"), + "unconfirmed_txs": rpc.NewRPCFunc(svc.UnconfirmedTxs, "page", "per_page"), + "num_unconfirmed_txs": rpc.NewRPCFunc(svc.NumUnconfirmedTxs), // tx broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(env.BroadcastTxCommit, "tx", false), - "broadcast_tx_sync": rpc.NewRPCFunc(env.BroadcastTxSync, "tx", false), - "broadcast_tx_async": rpc.NewRPCFunc(env.BroadcastTxAsync, "tx", false), + "broadcast_tx_commit": rpc.NewRPCFunc(svc.BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTxSync, "tx"), + "broadcast_tx_async": rpc.NewRPCFunc(svc.BroadcastTxAsync, "tx"), // abci API - "abci_query": rpc.NewRPCFunc(env.ABCIQuery, "path,data,height,prove", false), - "abci_info": rpc.NewRPCFunc(env.ABCIInfo, "", true), + "abci_query": rpc.NewRPCFunc(svc.ABCIQuery, "path", "data", "height", "prove"), + "abci_info": rpc.NewRPCFunc(svc.ABCIInfo), // evidence API - "broadcast_evidence": rpc.NewRPCFunc(env.BroadcastEvidence, "evidence", false), + "broadcast_evidence": rpc.NewRPCFunc(svc.BroadcastEvidence, "evidence"), } + if u, ok := svc.(RPCUnsafe); ok && opts.Unsafe { + out["unsafe_flush_mempool"] = rpc.NewRPCFunc(u.UnsafeFlushMempool) + } + return out +} + +// RPCService defines the set of methods exported by the RPC service +// implementation, for use in constructing a routing table. +type RPCService interface { + ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) + Block(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) + BlockSearch(ctx context.Context, query string, pagePtr, perPagePtr *int, orderBy string) (*coretypes.ResultBlockSearch, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) + BroadcastEvidence(ctx context.Context, ev coretypes.Evidence) (*coretypes.ResultBroadcastEvidence, error) + BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) + BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) + BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) + CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) + Commit(ctx context.Context, heightPtr *int64) (*coretypes.ResultCommit, error) + ConsensusParams(ctx context.Context, heightPtr *int64) (*coretypes.ResultConsensusParams, error) + DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) + Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) + GenesisChunked(ctx context.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) + GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) + Header(ctx context.Context, heightPtr *int64) (*coretypes.ResultHeader, error) + HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) + Health(ctx context.Context) (*coretypes.ResultHealth, error) + NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) + NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) + RemoveTx(ctx context.Context, txkey types.TxKey) error + Status(ctx context.Context) (*coretypes.ResultStatus, error) + Subscribe(ctx context.Context, query string) (*coretypes.ResultSubscribe, error) + Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) + TxSearch(ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*coretypes.ResultTxSearch, error) + UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) + Unsubscribe(ctx context.Context, query string) (*coretypes.ResultUnsubscribe, error) + UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) + Validators(ctx context.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) } -// AddUnsafeRoutes adds unsafe routes. -func (env *Environment) AddUnsafe(routes RoutesMap) { - // control API - routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) +// RPCUnsafe defines the set of "unsafe" methods that may optionally be +// exported by the RPC service. +type RPCUnsafe interface { + UnsafeFlushMempool(ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) } diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index b883c6dc21..2f648978aa 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -2,18 +2,19 @@ package core import ( "bytes" + "context" + "fmt" "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height, current max peer block height, and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { +func (env *Environment) Status(ctx context.Context) (*coretypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -59,8 +60,14 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, } } + var applicationInfo coretypes.ApplicationInfo + if abciInfo, err := env.ABCIInfo(ctx); err == nil { + applicationInfo.Version = fmt.Sprint(abciInfo.Response.AppVersion) + } + result := &coretypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo(), + NodeInfo: env.P2PTransport.NodeInfo(), + ApplicationInfo: applicationInfo, SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 60c7519c04..126875d0de 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -1,16 +1,16 @@ package core import ( + "context" "errors" "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -18,7 +18,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { +func (env *Environment) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { // if index is disabled, return error // N.B. The hash parameter is HexBytes so that the reflective parameter @@ -63,7 +63,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo // list of transactions (maximum ?per_page entries) and the total count. // More: https://docs.tendermint.com/master/rpc/#/Info/tx_search func (env *Environment) TxSearch( - ctx *rpctypes.Context, + ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, @@ -72,6 +72,8 @@ func (env *Environment) TxSearch( if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") + } else if len(query) > maxQueryLength { + return nil, errors.New("maximum query length exceeded") } q, err := tmquery.New(query) @@ -81,7 +83,7 @@ func (env *Environment) TxSearch( for _, sink := range env.EventSinks { if sink.Type() == indexer.KV { - results, err := sink.SearchTxEvents(ctx.Context(), q) + results, err := sink.SearchTxEvents(ctx, q) if err != nil { return nil, err } diff --git a/internal/state/execution.go b/internal/state/execution.go index 52a8933c02..30720e31dd 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -8,7 +8,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/encoding" - "github.com/tendermint/tendermint/internal/libs/fail" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" @@ -69,7 +69,7 @@ func NewBlockExecutor( res := &BlockExecutor{ store: stateStore, proxyApp: proxyApp, - eventBus: types.NopEventBus{}, + eventBus: eventbus.NopEventBus{}, mempool: pool, evpool: evpool, logger: logger, @@ -99,11 +99,14 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // and txs from the mempool. The max bytes must be big enough to fit the commit. // Up to 1/10th of the block space is allcoated for maximum sized evidence. // The rest is given to txs, up to the max gas. +// +// Contract: application will not return more bytes than are sent over the wire. func (blockExec *BlockExecutor) CreateProposalBlock( height int64, state State, commit *types.Commit, proposerAddr []byte, -) (*types.Block, *types.PartSet) { + votes []*types.Vote, +) (*types.Block, *types.PartSet, error) { maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas @@ -113,53 +116,50 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Fetch a limited amount of valid txs maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) - // TODO(ismail): reaping the mempool has to happen in relation to a max - // allowed square size instead of (only) Gas / bytes - // maybe the mempool actually should track things separately - // meaning that CheckTx should already do the mapping: - // Tx -> Txs, Message - // https://github.com/tendermint/tendermint/issues/77 txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) - l := len(txs) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bzs[i] = txs[i] - } - - // TODO(ismail): - // 1. get those intermediate state roots & messages either from the - // mempool or from the abci-app - // 1.1 at this point we should now the square / block size: - // https://github.com/celestiaorg/celestia-specs/blob/53e5f350838f1e0785ad670704bf91dac2f4f5a3/specs/block_proposer.md#deciding-on-a-block-size - // Here, we instead assume a fixed (max) square size instead. - // 2. feed them into MakeBlock below: - processedBlockTxs, err := blockExec.proxyApp.PreprocessTxsSync(context.TODO(), abci.RequestPreprocessTxs{Txs: bzs}) + + preparedProposal, err := blockExec.proxyApp.PrepareProposal( + context.Background(), + abci.RequestPrepareProposal{ + BlockData: txs.ToSliceOfBytes(), + BlockDataSize: maxDataBytes, + }, + ) if err != nil { - // The App MUST ensure that only valid (and hence 'processable') - // Tx enter the mempool. Hence, at this point, we can't have any non-processable - // transaction causing an error. Also, the App can simply skip any Tx that could cause any - // kind of trouble. + // The App MUST ensure that only valid (and hence 'processable') transactions + // enter the mempool. Hence, at this point, we can't have any non-processable + // transaction causing an error. + // + // Also, the App can simply skip any transaction that could cause any kind of trouble. // Either way, we can not recover in a meaningful way, unless we skip proposing - // this block, repair what caused the error and try again. - // Hence we panic on purpose for now. + // this block, repair what caused the error and try again. Hence, we panic on + // purpose for now. panic(err) } + newTxs := preparedProposal.GetBlockData() + var txSize int + for _, tx := range newTxs { + txSize += len(tx) - ppt := processedBlockTxs.GetTxs() - - pbmessages := processedBlockTxs.GetMessages() - - lp := len(ppt) - processedTxs := make(types.Txs, lp) - if lp > 0 { - for i := 0; i < l; i++ { - processedTxs[i] = ppt[i] + if maxDataBytes < int64(txSize) { + panic("block data exceeds max amount of allowed bytes") } } - messages := types.MessagesFromProto(pbmessages) + modifiedData, err := types.DataFromPreparedProposal(preparedProposal.GetBlockData()) + if err != nil { + return nil, nil, err + } - return state.MakeBlock(height, processedTxs, evidence, nil, messages.MessagesList, commit, proposerAddr) + return state.MakeBlock( + height, + modifiedData.Txs, + evidence, + modifiedData.IntermediateStateRoots.RawRootsList, + modifiedData.Messages.MessagesList, + commit, + proposerAddr, + ) } // ValidateBlock validates the given block against the given state. @@ -193,7 +193,10 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( - state State, blockID types.BlockID, block *types.Block, + ctx context.Context, + state State, + blockID types.BlockID, + block *types.Block, ) (State, error) { // validate the block if we haven't already @@ -202,7 +205,7 @@ func (blockExec *BlockExecutor) ApplyBlock( } startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp( + abciResponses, err := execBlockOnProxyApp(ctx, blockExec.logger, blockExec.proxyApp, block, blockExec.store, state.InitialHeight, ) endTime := time.Now().UnixNano() @@ -211,20 +214,16 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, ErrProxyAppConn(err) } - fail.Fail() // XXX - // Save the results before we commit. if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { return state, err } - fail.Fail() // XXX - // validate the validator updates and convert to tendermint types abciValUpdates := abciResponses.EndBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) if err != nil { - return state, fmt.Errorf("error in validator updates: %v", err) + return state, fmt.Errorf("error in validator updates: %w", err) } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) @@ -238,28 +237,24 @@ func (blockExec *BlockExecutor) ApplyBlock( // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.DeliverTxs) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %w", err) } // Update evpool with the latest state. blockExec.evpool.Update(state, block.Evidence.Evidence) - fail.Fail() // XXX - // Update the app hash and save the state. state.AppHash = appHash if err := blockExec.store.Save(state); err != nil { return state, err } - fail.Fail() // XXX - // Prune old heights, if requested by ABCI app. if retainHeight > 0 { pruned, err := blockExec.pruneBlocks(retainHeight) @@ -275,11 +270,42 @@ func (blockExec *BlockExecutor) ApplyBlock( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates) + fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates) return state, nil } +func (blockExec *BlockExecutor) ExtendVote(vote *types.Vote) (types.VoteExtension, error) { + ctx := context.Background() + req := abci.RequestExtendVote{ + Vote: vote.ToProto(), + } + + resp, err := blockExec.proxyApp.ExtendVote(ctx, req) + if err != nil { + return types.VoteExtension{}, err + } + return types.VoteExtensionFromProto(resp.VoteExtension), nil +} + +func (blockExec *BlockExecutor) VerifyVoteExtension(vote *types.Vote) error { + ctx := context.Background() + req := abci.RequestVerifyVoteExtension{ + Vote: vote.ToProto(), + } + + resp, err := blockExec.proxyApp.VerifyVoteExtension(ctx, req) + if err != nil { + return err + } + + if resp.IsErr() { + return types.ErrVoteInvalidExtension + } + + return nil +} + // Commit locks the mempool, runs the ABCI Commit message, and updates the // mempool. // It returns the result of calling abci.Commit (the AppHash) and the height to retain (if any). @@ -287,6 +313,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. func (blockExec *BlockExecutor) Commit( + ctx context.Context, state State, block *types.Block, deliverTxResponses []*abci.ResponseDeliverTx, @@ -296,16 +323,16 @@ func (blockExec *BlockExecutor) Commit( // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. - err := blockExec.mempool.FlushAppConn() + err := blockExec.mempool.FlushAppConn(ctx) if err != nil { blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) return nil, 0, err } // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync(context.Background()) + res, err := blockExec.proxyApp.Commit(ctx) if err != nil { - blockExec.logger.Error("client error during proxyAppConn.CommitSync", "err", err) + blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err) return nil, 0, err } @@ -319,6 +346,7 @@ func (blockExec *BlockExecutor) Commit( // Update mempool. err = blockExec.mempool.Update( + ctx, block.Height, block.Txs, deliverTxResponses, @@ -335,6 +363,7 @@ func (blockExec *BlockExecutor) Commit( // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set func execBlockOnProxyApp( + ctx context.Context, logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, @@ -348,26 +377,6 @@ func execBlockOnProxyApp( dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) abciResponses.DeliverTxs = dtxs - // Execute transactions and get hash. - proxyCb := func(req *abci.Request, res *abci.Response) { - if r, ok := res.Value.(*abci.Response_DeliverTx); ok { - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - - abciResponses.DeliverTxs[txIndex] = txRes - txIndex++ - } - } - proxyAppConn.SetResponseCallback(proxyCb) - commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) byzVals := make([]abci.Evidence, 0) @@ -375,8 +384,6 @@ func execBlockOnProxyApp( byzVals = append(byzVals, evidence.ABCI()...) } - ctx := context.Background() - // Begin block var err error pbh := block.Header.ToProto() @@ -384,7 +391,7 @@ func execBlockOnProxyApp( return nil, errors.New("nil header") } - abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync( + abciResponses.BeginBlock, err = proxyAppConn.BeginBlock( ctx, abci.RequestBeginBlock{ Hash: block.Hash(), @@ -400,13 +407,22 @@ func execBlockOnProxyApp( // run txs of block for _, tx := range block.Txs { - _, err = proxyAppConn.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: tx}) + resp, err := proxyAppConn.DeliverTx(ctx, abci.RequestDeliverTx{Tx: tx}) if err != nil { return nil, err } + if resp.Code == abci.CodeTypeOK { + validTxs++ + } else { + logger.Debug("invalid tx", "code", resp.Code, "log", resp.Log) + invalidTxs++ + } + + abciResponses.DeliverTxs[txIndex] = resp + txIndex++ } - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(ctx, abci.RequestEndBlock{Height: block.Height}) + abciResponses.EndBlock, err = proxyAppConn.EndBlock(ctx, abci.RequestEndBlock{Height: block.Height}) if err != nil { logger.Error("error in proxyAppConn.EndBlock", "err", err) return nil, err @@ -499,7 +515,7 @@ func updateState( if len(validatorUpdates) > 0 { err := nValSet.UpdateWithChangeSet(validatorUpdates) if err != nil { - return state, fmt.Errorf("error changing validator set: %v", err) + return state, fmt.Errorf("error changing validator set: %w", err) } // Change results from this height but only applies to the next next height. lastHeightValsChanged = header.Height + 1 + 1 @@ -516,7 +532,7 @@ func updateState( nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { - return state, fmt.Errorf("error updating consensus params: %v", err) + return state, fmt.Errorf("error updating consensus params: %w", err) } state.Version.Consensus.App = nextParams.Version.AppVersion @@ -527,7 +543,7 @@ func updateState( nextVersion := state.Version - // NOTE: the AppHash has not been populated. + // NOTE: the AppHash and the VoteExtension has not been populated. // It will be filled on state.Save. return State{ Version: nextVersion, @@ -551,6 +567,7 @@ func updateState( // Fire TxEvent for every tx. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again. func fireEvents( + ctx context.Context, logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, @@ -558,7 +575,7 @@ func fireEvents( abciResponses *tmstate.ABCIResponses, validatorUpdates []*types.Validator, ) { - if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + if err := eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{ Block: block, BlockID: blockID, ResultBeginBlock: *abciResponses.BeginBlock, @@ -567,7 +584,7 @@ func fireEvents( logger.Error("failed publishing new block", "err", err) } - if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + if err := eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ Header: block.Header, NumTxs: int64(len(block.Txs)), ResultBeginBlock: *abciResponses.BeginBlock, @@ -578,7 +595,7 @@ func fireEvents( if len(block.Evidence.Evidence) != 0 { for _, ev := range block.Evidence.Evidence { - if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + if err := eventBus.PublishEventNewEvidence(ctx, types.EventDataNewEvidence{ Evidence: ev, Height: block.Height, }); err != nil { @@ -588,7 +605,7 @@ func fireEvents( } for i, tx := range block.Data.Txs { - if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ + if err := eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: abci.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, @@ -599,7 +616,7 @@ func fireEvents( } if len(validatorUpdates) > 0 { - if err := eventBus.PublishEventValidatorSetUpdates( + if err := eventBus.PublishEventValidatorSetUpdates(ctx, types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { logger.Error("failed publishing event", "err", err) } @@ -612,6 +629,7 @@ func fireEvents( // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). func ExecCommitBlock( + ctx context.Context, be *BlockExecutor, appConnConsensus proxy.AppConnConsensus, block *types.Block, @@ -620,7 +638,7 @@ func ExecCommitBlock( initialHeight int64, s State, ) ([]byte, error) { - abciResponses, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight) + abciResponses, err := execBlockOnProxyApp(ctx, logger, appConnConsensus, block, store, initialHeight) if err != nil { logger.Error("failed executing block on proxy app", "height", block.Height, "err", err) return nil, err @@ -640,14 +658,19 @@ func ExecCommitBlock( return nil, err } - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} - fireEvents(be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates) + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + return nil, err + } + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + fireEvents(ctx, be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates) } // Commit block, get hash back - res, err := appConnConsensus.CommitSync(context.Background()) + res, err := appConnConsensus.Commit(ctx) if err != nil { - logger.Error("client error during proxyAppConn.CommitSync", "err", res) + logger.Error("client error during proxyAppConn.Commit", "err", res) return nil, err } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 313f71016e..96b92d9bd4 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -16,13 +16,14 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" - testfactory "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" @@ -37,22 +38,29 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) - state, stateDB, _ := makeState(1, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := sf.MakeBlock(state, 1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - state, err = blockExec.ApplyBlock(state, blockID, block) - require.Nil(t, err) + state, err = blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) // TODO check state and mempool assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") @@ -60,14 +68,17 @@ func TestApplyBlock(t *testing.T) { // TestBeginBlockValidators ensures we send absent validators list. func TestBeginBlockValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // no need to check error again + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) - state, stateDB, _ := makeState(2, 2) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, _ := makeState(t, 2, 2) stateStore := sm.NewStore(stateDB) prevHash := state.LastBlockID.Hash @@ -79,11 +90,15 @@ func TestBeginBlockValidators(t *testing.T) { commitSig0 = types.NewCommitSigForBlock( []byte("Signature1"), state.Validators.Validators[0].Address, - now) + now, + types.VoteExtensionToSign{}, + ) commitSig1 = types.NewCommitSigForBlock( []byte("Signature2"), state.Validators.Validators[1].Address, - now) + now, + types.VoteExtensionToSign{}, + ) absentSig = types.NewCommitSigAbsent() ) @@ -101,18 +116,11 @@ func TestBeginBlockValidators(t *testing.T) { lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) // block for height 2 - block, _ := state.MakeBlock( - 2, - testfactory.MakeTenTxs(2), - nil, - nil, - nil, - lastCommit, - state.Validators.GetProposer().Address, - ) + block, err := sf.MakeBlock(state, 2, lastCommit) + require.NoError(t, err) - _, err = sm.ExecCommitBlock(nil, proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, state) - require.Nil(t, err, tc.desc) + _, err = sm.ExecCommitBlock(ctx, nil, proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1, state) + require.NoError(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed ctr := 0 @@ -131,14 +139,16 @@ func TestBeginBlockValidators(t *testing.T) { // TestBeginBlockByzantineValidators ensures we send byzantine validators list. func TestBeginBlockByzantineValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + proxyApp := proxy.NewAppConns(cc, log.TestingLogger(), proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) - state, stateDB, privVals := makeState(1, 1) + state, stateDB, privVals := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) @@ -162,7 +172,8 @@ func TestBeginBlockByzantineValidators(t *testing.T) { } // we don't need to worry about validating the evidence as long as they pass validate basic - dve := types.NewMockDuplicateVoteEvidenceWithValidator(3, defaultEvidenceTime, privVal, state.ChainID) + dve, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 3, defaultEvidenceTime, privVal, state.ChainID) + require.NoError(t, err) dve.ValidatorPower = 1000 lcae := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -212,13 +223,17 @@ func TestBeginBlockByzantineValidators(t *testing.T) { blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, evpool, blockStore) - block := sf.MakeBlock(state, 1, new(types.Commit)) + block, err := sf.MakeBlock(state, 1, new(types.Commit)) + require.NoError(t, err) block.Evidence = types.EvidenceData{Evidence: ev} block.Header.EvidenceHash = block.Evidence.Hash() - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - _, err = blockExec.ApplyBlock(state, blockID, block) - require.Nil(t, err) + _, err = blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) // TODO check state and mempool assert.Equal(t, abciEv, app.ByzantineValidators) @@ -356,42 +371,47 @@ func TestUpdateValidators(t *testing.T) { // TestEndBlockValidatorUpdates ensures we update validator set and send an event. func TestEndBlockValidatorUpdates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), + logger, proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore, ) - eventBus := types.NewEventBus() - err = eventBus.Start() + eventBus := eventbus.NewDefault(logger) + err = eventBus.Start(ctx) require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests blockExec.SetEventBus(eventBus) - updatesSub, err := eventBus.Subscribe( - context.Background(), - "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, - ) + updatesSub, err := eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "TestEndBlockValidatorUpdates", + Query: types.EventQueryValidatorSetUpdates, + }) require.NoError(t, err) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := sf.MakeBlock(state, 1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} pubkey := ed25519.GenPrivKey().PubKey() pk, err := encoding.PubKeyToProto(pubkey) @@ -400,8 +420,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) { {PubKey: pk, Power: 10}, } - state, err = blockExec.ApplyBlock(state, blockID, block) - require.Nil(t, err) + state, err = blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { idx, _ := state.NextValidators.GetByAddress(pubkey.Address()) @@ -411,32 +431,32 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } // test we threw an event - select { - case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdates) - require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) - if assert.NotEmpty(t, event.ValidatorUpdates) { - assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) - assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) - } - case <-updatesSub.Canceled(): - t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) - case <-time.After(1 * time.Second): - t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := updatesSub.Next(ctx) + require.NoError(t, err) + event, ok := msg.Data().(types.EventDataValidatorSetUpdates) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) + if assert.NotEmpty(t, event.ValidatorUpdates) { + assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) } } // TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that // would result in empty set causes no panic, an error is raised and NextValidators is not updated func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.TestingLogger() + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( @@ -448,8 +468,11 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { blockStore, ) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := sf.MakeBlock(state, 1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} vp, err := encoding.PubKeyToProto(state.Validators.Validators[0].PubKey) require.NoError(t, err) @@ -458,8 +481,8 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { {PubKey: vp, Power: 0}, } - assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) - assert.NotNil(t, err) + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(ctx, state, blockID, block) }) + assert.Error(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index e5a3a32fbb..f228672b2e 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -2,9 +2,13 @@ package state_test import ( "bytes" + "context" "fmt" + "math/rand" + "testing" "time" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" @@ -16,6 +20,7 @@ import ( sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -31,73 +36,75 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} cc := abciclient.NewLocalCreator(app) - return proxy.NewAppConns(cc, proxy.NopMetrics()) + return proxy.NewAppConns(cc, log.NewNopLogger(), proxy.NopMetrics()) } func makeAndCommitGoodBlock( + ctx context.Context, + t *testing.T, state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, blockExec *sm.BlockExecutor, privVals map[string]types.PrivValidator, - evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) { + evidence []types.Evidence, +) (sm.State, types.BlockID, *types.Commit) { + t.Helper() + // A good block passes - state, blockID, err := makeAndApplyGoodBlock(state, height, lastCommit, proposerAddr, blockExec, evidence) - if err != nil { - return state, types.BlockID{}, nil, err - } + state, blockID := makeAndApplyGoodBlock(ctx, t, state, height, lastCommit, proposerAddr, blockExec, evidence) // Simulate a lastCommit for this block from all validators for the next height - commit, err := makeValidCommit(height, blockID, state.Validators, privVals) - if err != nil { - return state, types.BlockID{}, nil, err - } - return state, blockID, commit, nil + commit := makeValidCommit(ctx, t, height, blockID, state.Validators, privVals) + + return state, blockID, commit } -func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, - blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { - block, _ := state.MakeBlock( - height, - factory.MakeTenTxs(height), - evidence, - nil, - nil, - lastCommit, - proposerAddr, - ) - if err := blockExec.ValidateBlock(state, block); err != nil { - return state, types.BlockID{}, err - } +func makeAndApplyGoodBlock( + ctx context.Context, + t *testing.T, + state sm.State, + height int64, + lastCommit *types.Commit, + proposerAddr []byte, + blockExec *sm.BlockExecutor, + evidence []types.Evidence, +) (sm.State, types.BlockID) { + t.Helper() + block, _, err := state.MakeBlock(height, factory.MakeTenTxs(height), evidence, nil, nil, lastCommit, proposerAddr) + require.NoError(t, err) + + require.NoError(t, blockExec.ValidateBlock(state, block)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} - state, err := blockExec.ApplyBlock(state, blockID, block) - if err != nil { - return state, types.BlockID{}, err - } - return state, blockID, nil + state, err = blockExec.ApplyBlock(ctx, state, blockID, block) + require.NoError(t, err) + + return state, blockID } func makeValidCommit( + ctx context.Context, + t *testing.T, height int64, blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator, -) (*types.Commit, error) { +) *types.Commit { + t.Helper() sigs := make([]types.CommitSig, 0) for i := 0; i < vals.Size(); i++ { _, val := vals.GetByIndex(int32(i)) - vote, err := factory.MakeVote(privVals[val.Address.String()], chainID, int32(i), height, 0, 2, blockID, time.Now()) - if err != nil { - return nil, err - } + vote, err := factory.MakeVote(ctx, privVals[val.Address.String()], chainID, int32(i), height, 0, 2, blockID, time.Now()) + require.NoError(t, err) sigs = append(sigs, vote.CommitSig()) } - return types.NewCommit(height, 0, blockID, sigs), nil + + return types.NewCommit(height, 0, blockID, sigs) } -func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { +func makeState(t *testing.T, nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { vals := make([]types.GenesisValidator, nVals) privVals := make(map[string]types.PrivValidator, nVals) for i := 0; i < nVals; i++ { @@ -120,16 +127,13 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < height; i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals @@ -144,11 +148,13 @@ func genValSet(size int) *types.ValidatorSet { } func makeHeaderPartsResponsesValPubKeyChange( + t *testing.T, state sm.State, pubkey crypto.PubKey, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -157,13 +163,10 @@ func makeHeaderPartsResponsesValPubKeyChange( _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { vPbPk, err := encoding.PubKeyToProto(val.PubKey) - if err != nil { - panic(err) - } + require.NoError(t, err) pbPk, err := encoding.PubKeyToProto(pubkey) - if err != nil { - panic(err) - } + require.NoError(t, err) + abciResponses.EndBlock = &abci.ResponseEndBlock{ ValidatorUpdates: []abci.ValidatorUpdate{ {PubKey: vPbPk, Power: 0}, @@ -176,11 +179,15 @@ func makeHeaderPartsResponsesValPubKeyChange( } func makeHeaderPartsResponsesValPowerChange( + t *testing.T, state sm.State, power int64, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { + t.Helper() + + block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -190,9 +197,8 @@ func makeHeaderPartsResponsesValPowerChange( _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { vPbPk, err := encoding.PubKeyToProto(val.PubKey) - if err != nil { - panic(err) - } + require.NoError(t, err) + abciResponses.EndBlock = &abci.ResponseEndBlock{ ValidatorUpdates: []abci.ValidatorUpdate{ {PubKey: vPbPk, Power: power}, @@ -204,11 +210,14 @@ func makeHeaderPartsResponsesValPowerChange( } func makeHeaderPartsResponsesParams( + t *testing.T, state sm.State, params *types.ConsensusParams, ) (types.Header, types.BlockID, *tmstate.ABCIResponses) { + t.Helper() - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -250,10 +259,16 @@ func makeRandomStateFromValidatorSet( InitialHeight: 1, } } - -func makeRandomStateFromConsensusParams(consensusParams *types.ConsensusParams, - height, lastHeightConsensusParamsChanged int64) sm.State { - val, _ := factory.RandValidator(true, 10) +func makeRandomStateFromConsensusParams( + ctx context.Context, + t *testing.T, + consensusParams *types.ConsensusParams, + height, + lastHeightConsensusParamsChanged int64, +) sm.State { + t.Helper() + val, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) valSet := types.NewValidatorSet([]*types.Validator{val}) return sm.State{ LastBlockHeight: height - 1, diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index d52f06c966..2ac133bf1a 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,8 +12,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -91,10 +92,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, default: } - conditions, err := q.Conditions() - if err != nil { - return nil, fmt.Errorf("failed to parse query conditions: %w", err) - } + conditions := q.Syntax() // If there is an exact height query, return the result immediately // (if it exists). @@ -158,7 +156,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, continue } - startKey, err := orderedcode.Append(nil, c.CompositeKey, fmt.Sprintf("%v", c.Operand)) + startKey, err := orderedcode.Append(nil, c.Tag, c.Arg.Value()) if err != nil { return nil, err } @@ -327,7 +325,7 @@ iter: // matched. func (idx *BlockerIndexer) match( ctx context.Context, - c query.Condition, + c syntax.Condition, startKeyBz []byte, filteredHeights map[string][]byte, firstRun bool, @@ -342,7 +340,7 @@ func (idx *BlockerIndexer) match( tmpHeights := make(map[string][]byte) switch { - case c.Op == query.OpEqual: + case c.Op == syntax.TEq: it, err := dbm.IteratePrefix(idx.store, startKeyBz) if err != nil { return nil, fmt.Errorf("failed to create prefix iterator: %w", err) @@ -361,8 +359,8 @@ func (idx *BlockerIndexer) match( return nil, err } - case c.Op == query.OpExists: - prefix, err := orderedcode.Append(nil, c.CompositeKey) + case c.Op == syntax.TExists: + prefix, err := orderedcode.Append(nil, c.Tag) if err != nil { return nil, err } @@ -389,8 +387,8 @@ func (idx *BlockerIndexer) match( return nil, err } - case c.Op == query.OpContains: - prefix, err := orderedcode.Append(nil, c.CompositeKey) + case c.Op == syntax.TContains: + prefix, err := orderedcode.Append(nil, c.Tag) if err != nil { return nil, err } @@ -408,7 +406,7 @@ func (idx *BlockerIndexer) match( continue } - if strings.Contains(eventValue, c.Operand.(string)) { + if strings.Contains(eventValue, c.Arg.Value()) { tmpHeights[string(it.Value())] = it.Value() } diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 024df332c2..8878e0f088 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -9,8 +9,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -94,39 +94,39 @@ func TestBlockIndexer(t *testing.T) { results []int64 }{ "block.height = 100": { - q: query.MustParse("block.height = 100"), + q: query.MustCompile(`block.height = 100`), results: []int64{}, }, "block.height = 5": { - q: query.MustParse("block.height = 5"), + q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, "begin_event.key1 = 'value1'": { - q: query.MustParse("begin_event.key1 = 'value1'"), + q: query.MustCompile(`begin_event.key1 = 'value1'`), results: []int64{}, }, "begin_event.proposer = 'FCAA001'": { - q: query.MustParse("begin_event.proposer = 'FCAA001'"), + q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, "end_event.foo <= 5": { - q: query.MustParse("end_event.foo <= 5"), + q: query.MustCompile(`end_event.foo <= 5`), results: []int64{2, 4}, }, "end_event.foo >= 100": { - q: query.MustParse("end_event.foo >= 100"), + q: query.MustCompile(`end_event.foo >= 100`), results: []int64{1}, }, "block.height > 2 AND end_event.foo <= 8": { - q: query.MustParse("block.height > 2 AND end_event.foo <= 8"), + q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), results: []int64{4, 6, 8}, }, "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustParse("begin_event.proposer CONTAINS 'FFFFFFF'"), + q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustParse("begin_event.proposer CONTAINS 'FCAA001'"), + q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } @@ -134,7 +134,10 @@ func TestBlockIndexer(t *testing.T) { for name, tc := range testCases { tc := tc t.Run(name, func(t *testing.T) { - results, err := indexer.Search(context.Background(), tc.q) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + results, err := indexer.Search(ctx, tc.q) require.NoError(t, err) require.Equal(t, tc.results, results) }) diff --git a/internal/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go index c0b88018e0..28e22718c5 100644 --- a/internal/state/indexer/block/kv/util.go +++ b/internal/state/indexer/block/kv/util.go @@ -6,7 +6,7 @@ import ( "strconv" "github.com/google/orderedcode" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) @@ -85,10 +85,10 @@ func parseValueFromEventKey(key []byte) (string, error) { return eventValue, nil } -func lookForHeight(conditions []query.Condition) (int64, bool) { +func lookForHeight(conditions []syntax.Condition) (int64, bool) { for _, c := range conditions { - if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64), true + if c.Tag == types.BlockHeightKey && c.Op == syntax.TEq { + return int64(c.Arg.Number()), true } } diff --git a/internal/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go index 9b28d93bba..7d5453848e 100644 --- a/internal/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go index dba50b6af7..9b4d6f5614 100644 --- a/internal/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -4,7 +4,7 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/indexer.go b/internal/state/indexer/indexer.go index 24dc62d70e..a1b78a257c 100644 --- a/internal/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -5,7 +5,7 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -41,26 +41,26 @@ type BlockIndexer interface { // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []*abci.TxResult + Ops []*abci.TxResult + Pending int64 } // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { - return &Batch{ - Ops: make([]*abci.TxResult, n), - } + return &Batch{Ops: make([]*abci.TxResult, n), Pending: n} } // Add or update an entry for the given result.Index. func (b *Batch) Add(result *abci.TxResult) error { - b.Ops[result.Index] = result + if b.Ops[result.Index] == nil { + b.Pending-- + b.Ops[result.Index] = result + } return nil } // Size returns the total number of operations inside the batch. -func (b *Batch) Size() int { - return len(b.Ops) -} +func (b *Batch) Size() int { return len(b.Ops) } // ErrorEmptyHash indicates empty hash var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index 39a1847f82..e73e4a3ba2 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -2,118 +2,152 @@ package indexer import ( "context" + "time" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) -// XXX/TODO: These types should be moved to the indexer package. - -const ( - subscriber = "IndexerService" -) - // Service connects event bus, transaction and block indexers together in // order to index transactions and blocks coming from the event bus. type Service struct { service.BaseService + logger log.Logger eventSinks []EventSink - eventBus *types.EventBus -} + eventBus *eventbus.EventBus + metrics *Metrics -// NewIndexerService returns a new service instance. -func NewIndexerService(es []EventSink, eventBus *types.EventBus) *Service { + currentBlock struct { + header types.EventDataNewBlockHeader + height int64 + batch *Batch + } +} - is := &Service{eventSinks: es, eventBus: eventBus} - is.BaseService = *service.NewBaseService(nil, "IndexerService", is) +// NewService constructs a new indexer service from the given arguments. +func NewService(args ServiceArgs) *Service { + is := &Service{ + logger: args.Logger, + eventSinks: args.Sinks, + eventBus: args.EventBus, + metrics: args.Metrics, + } + if is.metrics == nil { + is.metrics = NopMetrics() + } + is.BaseService = *service.NewBaseService(args.Logger, "IndexerService", is) return is } -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. -func (is *Service) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // canceled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. - blockHeadersSub, err := is.eventBus.SubscribeUnbuffered( - context.Background(), - subscriber, - types.EventQueryNewBlockHeader) - if err != nil { - return err +// publish publishes a pubsub message to the service. The service blocks until +// the message has been fully processed. +func (is *Service) publish(msg pubsub.Message) error { + // Indexing has three states. Initially, no block is in progress (WAIT) and + // we expect a block header. Upon seeing a header, we are waiting for zero + // or more transactions (GATHER). Once all the expected transactions have + // been delivered (in some order), we are ready to index. After indexing a + // block, we revert to the WAIT state for the next block. + + if is.currentBlock.batch == nil { + // WAIT: Start a new block. + hdr := msg.Data().(types.EventDataNewBlockHeader) + is.currentBlock.header = hdr + is.currentBlock.height = hdr.Header.Height + is.currentBlock.batch = NewBatch(hdr.NumTxs) + + if hdr.NumTxs != 0 { + return nil + } + // If the block does not expect any transactions, fall through and index + // it immediately. This shouldn't happen, but this check ensures we do + // not get stuck if it does. } - txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx) - if err != nil { - return err - } + curr := is.currentBlock.batch + if curr.Pending != 0 { + // GATHER: Accumulate a transaction into the current block's batch. + txResult := msg.Data().(types.EventDataTx).TxResult + if err := curr.Add(&txResult); err != nil { + is.logger.Error("failed to add tx to batch", + "height", is.currentBlock.height, "index", txResult.Index, "err", err) + } - go func() { - for { - select { - case <-blockHeadersSub.Canceled(): - return - case msg := <-blockHeadersSub.Out(): - - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) - - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult - - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } + // This may have been the last transaction in the batch, so fall through + // to check whether it is time to index. + } - if !IndexingEnabled(is.eventSinks) { - continue - } + if curr.Pending == 0 { + // INDEX: We have all the transactions we expect for the current block. + for _, sink := range is.eventSinks { + start := time.Now() + if err := sink.IndexBlockEvents(is.currentBlock.header); err != nil { + is.logger.Error("failed to index block header", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.BlockEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.BlocksIndexed.Add(1) + is.logger.Debug("indexed block", + "height", is.currentBlock.height, "sink", sink.Type()) + } - for _, sink := range is.eventSinks { - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) - } - - if len(batch.Ops) > 0 { - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) - } else { - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) - } - } + if curr.Size() != 0 { + start := time.Now() + err := sink.IndexTxEvents(curr.Ops) + if err != nil { + is.logger.Error("failed to index block txs", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.TxEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.TransactionsIndexed.Add(float64(curr.Size())) + is.logger.Debug("indexed txs", + "height", is.currentBlock.height, "sink", sink.Type()) } } } - }() + is.currentBlock.batch = nil // return to the WAIT state for the next block + } + return nil } -// OnStop implements service.Service by unsubscribing from all transactions and -// close the eventsink. -func (is *Service) OnStop() { - if is.eventBus.IsRunning() { - _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) +// OnStart implements part of service.Service. It registers an observer for the +// indexer if the underlying event sinks support indexing. +// +// TODO(creachadair): Can we get rid of the "enabled" check? +func (is *Service) OnStart(ctx context.Context) error { + // If the event sinks support indexing, register an observer to capture + // block header data for the indexer. + if IndexingEnabled(is.eventSinks) { + err := is.eventBus.Observe(ctx, is.publish, + types.EventQueryNewBlockHeader, types.EventQueryTx) + if err != nil { + return err + } } + return nil +} +// OnStop implements service.Service by closing the event sinks. +func (is *Service) OnStop() { for _, sink := range is.eventSinks { if err := sink.Stop(); err != nil { - is.Logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) + is.logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) } } } +// ServiceArgs are arguments for constructing a new indexer service. +type ServiceArgs struct { + Sinks []EventSink + EventBus *eventbus.EventBus + Metrics *Metrics + Logger log.Logger +} + // KVSinkEnabled returns the given eventSinks is containing KVEventSink. func KVSinkEnabled(sinks []EventSink) bool { for _, sink := range sinks { diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d9f29b6773..47be1e28e7 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -1,24 +1,25 @@ package indexer_test import ( + "context" "database/sql" "fmt" - "io/ioutil" "os" "testing" "time" "github.com/adlio/schema" - dockertest "github.com/ory/dockertest" + "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - kv "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" @@ -39,41 +40,38 @@ var ( ) func TestIndexerServiceIndexesBlocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := tmlog.TestingLogger() // event bus - eventBus := types.NewEventBus() - eventBus.SetLogger(tmlog.TestingLogger()) - err := eventBus.Start() + eventBus := eventbus.NewDefault(logger) + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(eventBus.Wait) assert.False(t, indexer.KVSinkEnabled([]indexer.EventSink{})) assert.False(t, indexer.IndexingEnabled([]indexer.EventSink{})) // event sink setup pool, err := setupDB(t) - assert.Nil(t, err) + assert.NoError(t, err) store := dbm.NewMemDB() eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) - service := indexer.NewIndexerService(eventSinks, eventBus) - service.SetLogger(tmlog.TestingLogger()) - err = service.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := service.Stop(); err != nil { - t.Error(err) - } + service := indexer.NewService(indexer.ServiceArgs{ + Logger: logger, + Sinks: eventSinks, + EventBus: eventBus, }) + require.NoError(t, service.Start(ctx)) + t.Cleanup(service.Wait) // publish block with txs - err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + err = eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, NumTxs: int64(2), }) @@ -84,7 +82,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Tx: types.Tx("foo"), Result: abci.ResponseDeliverTx{Code: 0}, } - err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) + err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult1}) require.NoError(t, err) txResult2 := &abci.TxResult{ Height: 1, @@ -92,7 +90,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Tx: types.Tx("bar"), Result: abci.ResponseDeliverTx{Code: 0}, } - err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) + err = eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: *txResult2}) require.NoError(t, err) time.Sleep(100 * time.Millisecond) @@ -114,7 +112,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } @@ -128,17 +126,17 @@ func readSchema() ([]*schema.Migration, error) { func resetDB(t *testing.T) { q := "DROP TABLE IF EXISTS block_events,tx_events,tx_results" _, err := psqldb.Exec(q) - assert.Nil(t, err) + assert.NoError(t, err) q = "DROP TYPE IF EXISTS block_event_type" _, err = psqldb.Exec(q) - assert.Nil(t, err) + assert.NoError(t, err) } func setupDB(t *testing.T) (*dockertest.Pool, error) { t.Helper() pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) - assert.Nil(t, err) + assert.NoError(t, err) resource, err = pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", @@ -158,7 +156,7 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { } }) - assert.Nil(t, err) + assert.NoError(t, err) // Set the container to expire in a minute to avoid orphaned containers // hanging around @@ -180,10 +178,11 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { resetDB(t) sm, err := readSchema() - assert.Nil(t, err) + assert.NoError(t, err) - err = schema.NewMigrator().Apply(psqldb, sm) - assert.Nil(t, err) + migrator := schema.NewMigrator() + err = migrator.Apply(psqldb, sm) + assert.NoError(t, err) return pool, nil } diff --git a/internal/state/indexer/metrics.go b/internal/state/indexer/metrics.go new file mode 100644 index 0000000000..aa64a4bb2b --- /dev/null +++ b/internal/state/indexer/metrics.go @@ -0,0 +1,73 @@ +package indexer + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// MetricsSubsystem is a the subsystem label for the indexer package. +const MetricsSubsystem = "indexer" + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Latency for indexing block events. + BlockEventsSeconds metrics.Histogram + + // Latency for indexing transaction events. + TxEventsSeconds metrics.Histogram + + // Number of complete blocks indexed. + BlocksIndexed metrics.Counter + + // Number of transactions indexed. + TransactionsIndexed metrics.Counter +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + BlockEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_events_seconds", + Help: "Latency for indexing block events.", + }, labels).With(labelsAndValues...), + TxEventsSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_events_seconds", + Help: "Latency for indexing transaction events.", + }, labels).With(labelsAndValues...), + BlocksIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "blocks_indexed", + Help: "Number of complete blocks indexed.", + }, labels).With(labelsAndValues...), + TransactionsIndexed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "transactions_indexed", + Help: "Number of transactions indexed.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns an indexer metrics stub that discards all samples. +func NopMetrics() *Metrics { + return &Metrics{ + BlockEventsSeconds: discard.NewHistogram(), + TxEventsSeconds: discard.NewHistogram(), + BlocksIndexed: discard.NewCounter(), + TransactionsIndexed: discard.NewCounter(), + } +} diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 98b32e9350..d5555a4170 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" diff --git a/internal/state/indexer/query_range.go b/internal/state/indexer/query_range.go index b4edf53c54..ff54cd32b8 100644 --- a/internal/state/indexer/query_range.go +++ b/internal/state/indexer/query_range.go @@ -3,7 +3,7 @@ package indexer import ( "time" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) // QueryRanges defines a mapping between a composite event key and a QueryRange. @@ -77,32 +77,32 @@ func (qr QueryRange) UpperBoundValue() interface{} { // LookForRanges returns a mapping of QueryRanges and the matching indexes in // the provided query conditions. -func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes []int) { +func LookForRanges(conditions []syntax.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { if IsRangeOperation(c.Op) { - r, ok := ranges[c.CompositeKey] + r, ok := ranges[c.Tag] if !ok { - r = QueryRange{Key: c.CompositeKey} + r = QueryRange{Key: c.Tag} } switch c.Op { - case query.OpGreater: - r.LowerBound = c.Operand + case syntax.TGt: + r.LowerBound = conditionArg(c) - case query.OpGreaterEqual: + case syntax.TGeq: r.IncludeLowerBound = true - r.LowerBound = c.Operand + r.LowerBound = conditionArg(c) - case query.OpLess: - r.UpperBound = c.Operand + case syntax.TLt: + r.UpperBound = conditionArg(c) - case query.OpLessEqual: + case syntax.TLeq: r.IncludeUpperBound = true - r.UpperBound = c.Operand + r.UpperBound = conditionArg(c) } - ranges[c.CompositeKey] = r + ranges[c.Tag] = r indexes = append(indexes, i) } } @@ -112,12 +112,26 @@ func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes [] // IsRangeOperation returns a boolean signifying if a query Operator is a range // operation or not. -func IsRangeOperation(op query.Operator) bool { +func IsRangeOperation(op syntax.Token) bool { switch op { - case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: + case syntax.TGt, syntax.TGeq, syntax.TLt, syntax.TLeq: return true default: return false } } + +func conditionArg(c syntax.Condition) interface{} { + if c.Arg == nil { + return nil + } + switch c.Arg.Type { + case syntax.TNumber: + return int64(c.Arg.Number()) + case syntax.TTime, syntax.TDate: + return c.Arg.Time() + default: + return c.Arg.Value() // string + } +} diff --git a/internal/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go index fe7068a1b6..10282fd340 100644 --- a/internal/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -6,10 +6,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index 7d75529465..b6436dafd9 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -111,39 +111,39 @@ func TestBlockFuncs(t *testing.T) { results []int64 }{ "block.height = 100": { - q: query.MustParse("block.height = 100"), + q: query.MustCompile(`block.height = 100`), results: []int64{}, }, "block.height = 5": { - q: query.MustParse("block.height = 5"), + q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, "begin_event.key1 = 'value1'": { - q: query.MustParse("begin_event.key1 = 'value1'"), + q: query.MustCompile(`begin_event.key1 = 'value1'`), results: []int64{}, }, "begin_event.proposer = 'FCAA001'": { - q: query.MustParse("begin_event.proposer = 'FCAA001'"), + q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, "end_event.foo <= 5": { - q: query.MustParse("end_event.foo <= 5"), + q: query.MustCompile(`end_event.foo <= 5`), results: []int64{2, 4}, }, "end_event.foo >= 100": { - q: query.MustParse("end_event.foo >= 100"), + q: query.MustCompile(`end_event.foo >= 100`), results: []int64{1}, }, "block.height > 2 AND end_event.foo <= 8": { - q: query.MustParse("block.height > 2 AND end_event.foo <= 8"), + q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), results: []int64{4, 6, 8}, }, "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustParse("begin_event.proposer CONTAINS 'FFFFFFF'"), + q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustParse("begin_event.proposer CONTAINS 'FCAA001'"), + q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } @@ -151,7 +151,10 @@ func TestBlockFuncs(t *testing.T) { for name, tc := range testCases { tc := tc t.Run(name, func(t *testing.T) { - results, err := indexer.SearchBlockEvents(context.Background(), tc.q) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + results, err := indexer.SearchBlockEvents(ctx, tc.q) require.NoError(t, err) require.Equal(t, tc.results, results) }) @@ -175,7 +178,7 @@ func TestTxSearchWithCancelation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - results, err := indexer.SearchTxEvents(ctx, query.MustParse("account.number = 1")) + results, err := indexer.SearchTxEvents(ctx, query.MustCompile(`account.number = 1`)) assert.NoError(t, err) assert.Empty(t, results) } @@ -249,7 +252,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.SearchTxEvents(ctx, query.MustParse(tc.q)) + results, err := indexer.SearchTxEvents(ctx, query.MustCompile(tc.q)) require.NoError(t, err) for _, txr := range results { for _, tr := range tc.results { @@ -273,7 +276,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { ctx := context.Background() - results, err := indexer.SearchTxEvents(ctx, query.MustParse("account.number >= 1")) + results, err := indexer.SearchTxEvents(ctx, query.MustCompile(`account.number >= 1`)) assert.NoError(t, err) assert.Len(t, results, 1) @@ -330,7 +333,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { ctx := context.Background() - results, err := indexer.SearchTxEvents(ctx, query.MustParse("account.number >= 1")) + results, err := indexer.SearchTxEvents(ctx, query.MustCompile(`account.number >= 1`)) assert.NoError(t, err) require.Len(t, results, 3) diff --git a/internal/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go index f58142f21e..c436bdf0f1 100644 --- a/internal/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go index 15b77dc55b..6de7669ce0 100644 --- a/internal/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -10,22 +10,25 @@ import ( ) func TestNullEventSink(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nullIndexer := NewEventSink() assert.Nil(t, nullIndexer.IndexTxEvents(nil)) assert.Nil(t, nullIndexer.IndexBlockEvents(types.EventDataNewBlockHeader{})) - val1, err1 := nullIndexer.SearchBlockEvents(context.TODO(), nil) + val1, err1 := nullIndexer.SearchBlockEvents(ctx, nil) assert.Nil(t, val1) - assert.Nil(t, err1) - val2, err2 := nullIndexer.SearchTxEvents(context.TODO(), nil) + assert.NoError(t, err1) + val2, err2 := nullIndexer.SearchTxEvents(ctx, nil) assert.Nil(t, val2) - assert.Nil(t, err2) + assert.NoError(t, err2) val3, err3 := nullIndexer.GetTxByHash(nil) assert.Nil(t, val3) - assert.Nil(t, err3) + assert.NoError(t, err3) val4, err4 := nullIndexer.HasBlock(0) assert.False(t, val4) - assert.Nil(t, err4) + assert.NoError(t, err4) } func TestType(t *testing.T) { diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index 4db6f44359..1208bca192 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -11,8 +11,8 @@ import ( "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -39,6 +39,8 @@ func NewEventSink(connStr, chainID string) (*EventSink, error) { db, err := sql.Open(driverName, connStr) if err != nil { return nil, err + } else if err := db.Ping(); err != nil { + return nil, err } return &EventSink{ diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index f19bbfba7f..2168eb556f 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -5,7 +5,6 @@ import ( "database/sql" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -111,7 +110,9 @@ func TestMain(m *testing.M) { sm, err := readSchema() if err != nil { log.Fatalf("Reading schema: %v", err) - } else if err := schema.NewMigrator().Apply(db, sm); err != nil { + } + migrator := schema.NewMigrator() + if err := migrator.Apply(db, sm); err != nil { log.Fatalf("Applying schema: %v", err) } @@ -144,6 +145,9 @@ func TestType(t *testing.T) { } func TestIndexing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("IndexBlockEvents", func(t *testing.T) { indexer := &EventSink{store: testDB(), chainID: chainID} require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) @@ -155,7 +159,7 @@ func TestIndexing(t *testing.T) { verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) verifyNotImplemented(t, "block search", func() (bool, error) { - v, err := indexer.SearchBlockEvents(context.Background(), nil) + v, err := indexer.SearchBlockEvents(ctx, nil) return v != nil, err }) @@ -189,7 +193,7 @@ func TestIndexing(t *testing.T) { return txr != nil, err }) verifyNotImplemented(t, "tx search", func() (bool, error) { - txr, err := indexer.SearchTxEvents(context.Background(), nil) + txr, err := indexer.SearchTxEvents(ctx, nil) return txr != nil, err }) @@ -227,7 +231,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { // readSchema loads the indexing database schema file func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } @@ -242,11 +246,11 @@ func readSchema() ([]*schema.Migration, error) { func resetDatabase(db *sql.DB) error { _, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`) if err != nil { - return fmt.Errorf("dropping tables: %v", err) + return fmt.Errorf("dropping tables: %w", err) } _, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`) if err != nil { - return fmt.Errorf("dropping views: %v", err) + return fmt.Errorf("dropping views: %w", err) } return nil } @@ -278,7 +282,7 @@ SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; txr := new(abci.TxResult) if err := proto.Unmarshal(resultData, txr); err != nil { - return nil, fmt.Errorf("unmarshaling txr: %v", err) + return nil, fmt.Errorf("unmarshaling txr: %w", err) } return txr, nil @@ -309,7 +313,7 @@ SELECT type, height, chain_id FROM `+viewBlockEvents+` `, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows { t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height) } else if err != nil { - t.Fatalf("Database query failed: %v", err) + t.Fatalf("Database query failed: %c", err) } if err := testDB().QueryRow(` @@ -332,7 +336,7 @@ func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) { want := label + " is not supported via the postgres event sink" ok, err := f() assert.False(t, ok) - require.NotNil(t, err) + require.Error(t, err) assert.Equal(t, want, err.Error()) } diff --git a/internal/state/indexer/sink/sink.go b/internal/state/indexer/sink/sink.go index b4c41ec311..cae861416c 100644 --- a/internal/state/indexer/sink/sink.go +++ b/internal/state/indexer/sink/sink.go @@ -13,8 +13,6 @@ import ( // EventSinksFromConfig constructs a slice of indexer.EventSink using the provided // configuration. -// -//nolint:lll func EventSinksFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) ([]indexer.EventSink, error) { if len(cfg.TxIndex.Indexer) == 0 { return []indexer.EventSink{null.NewEventSink()}, nil diff --git a/internal/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go index f0550f8f37..1087005232 100644 --- a/internal/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,8 +12,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" indexer "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -52,7 +53,7 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { txResult := new(abci.TxResult) err = proto.Unmarshal(rawBytes, txResult) if err != nil { - return nil, fmt.Errorf("error reading TxResult: %v", err) + return nil, fmt.Errorf("error reading TxResult: %w", err) } return txResult, nil @@ -148,10 +149,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes := make(map[string][]byte) // get a list of conditions (like "tx.height > 5") - conditions, err := q.Conditions() - if err != nil { - return nil, fmt.Errorf("error during parsing conditions from query: %w", err) - } + conditions := q.Syntax() // if there is a hash condition, return the result immediately hash, ok, err := lookForHash(conditions) @@ -238,10 +236,10 @@ hashes: return results, nil } -func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) { +func lookForHash(conditions []syntax.Condition) (hash []byte, ok bool, err error) { for _, c := range conditions { - if c.CompositeKey == types.TxHashKey { - decoded, err := hex.DecodeString(c.Operand.(string)) + if c.Tag == types.TxHashKey { + decoded, err := hex.DecodeString(c.Arg.Value()) return decoded, true, err } } @@ -249,10 +247,10 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) } // lookForHeight returns a height if there is an "height=X" condition. -func lookForHeight(conditions []query.Condition) (height int64) { +func lookForHeight(conditions []syntax.Condition) (height int64) { for _, c := range conditions { - if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual { - return c.Operand.(int64) + if c.Tag == types.TxHeightKey && c.Op == syntax.TEq { + return int64(c.Arg.Number()) } } return 0 @@ -265,7 +263,7 @@ func lookForHeight(conditions []query.Condition) (height int64) { // NOTE: filteredHashes may be empty if no previous condition has matched. func (txi *TxIndex) match( ctx context.Context, - c query.Condition, + c syntax.Condition, startKeyBz []byte, filteredHashes map[string][]byte, firstRun bool, @@ -279,7 +277,7 @@ func (txi *TxIndex) match( tmpHashes := make(map[string][]byte) switch { - case c.Op == query.OpEqual: + case c.Op == syntax.TEq: it, err := dbm.IteratePrefix(txi.store, startKeyBz) if err != nil { panic(err) @@ -301,10 +299,10 @@ func (txi *TxIndex) match( panic(err) } - case c.Op == query.OpExists: + case c.Op == syntax.TExists: // XXX: can't use startKeyBz here because c.Operand is nil // (e.g. "account.owner//" won't match w/ a single row) - it, err := dbm.IteratePrefix(txi.store, prefixFromCompositeKey(c.CompositeKey)) + it, err := dbm.IteratePrefix(txi.store, prefixFromCompositeKey(c.Tag)) if err != nil { panic(err) } @@ -325,11 +323,11 @@ func (txi *TxIndex) match( panic(err) } - case c.Op == query.OpContains: + case c.Op == syntax.TContains: // XXX: startKey does not apply here. // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" - it, err := dbm.IteratePrefix(txi.store, prefixFromCompositeKey(c.CompositeKey)) + it, err := dbm.IteratePrefix(txi.store, prefixFromCompositeKey(c.Tag)) if err != nil { panic(err) } @@ -341,7 +339,7 @@ func (txi *TxIndex) match( if err != nil { continue } - if strings.Contains(value, c.Operand.(string)) { + if strings.Contains(value, c.Arg.Value()) { tmpHashes[string(it.Value())] = it.Value() } @@ -577,8 +575,8 @@ func prefixFromCompositeKeyAndValue(compositeKey, value string) []byte { } // a small utility function for getting a keys prefix based on a condition and a height -func prefixForCondition(c query.Condition, height int64) []byte { - key := prefixFromCompositeKeyAndValue(c.CompositeKey, fmt.Sprintf("%v", c.Operand)) +func prefixForCondition(c syntax.Condition, height int64) []byte { + key := prefixFromCompositeKeyAndValue(c.Tag, c.Arg.Value()) if height > 0 { var err error key, err = orderedcode.Append(key, height) diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index 3f4e63ee16..7744c31832 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -4,18 +4,18 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" + "os" "testing" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") + dbDir, err := os.MkdirTemp("", "benchmark_tx_search_test") if err != nil { b.Errorf("failed to create temporary directory: %s", err) } @@ -60,7 +60,7 @@ func BenchmarkTxSearch(b *testing.B) { } } - txQuery := query.MustParse("transfer.address = 'address_43' AND transfer.amount = 50") + txQuery := query.MustCompile(`transfer.address = 'address_43' AND transfer.amount = 50`) b.ResetTimer() diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index c8ab2b0f28..018fe51b48 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -3,7 +3,6 @@ package kv import ( "context" "fmt" - "io/ioutil" "os" "testing" @@ -13,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/state/indexer" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) @@ -132,7 +131,7 @@ func TestTxSearch(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustParse(tc.q)) + results, err := indexer.Search(ctx, query.MustCompile(tc.q)) assert.NoError(t, err) assert.Len(t, results, tc.resultsLength) @@ -158,7 +157,7 @@ func TestTxSearchWithCancelation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - results, err := indexer.Search(ctx, query.MustParse("account.number = 1")) + results, err := indexer.Search(ctx, query.MustCompile(`account.number = 1`)) assert.NoError(t, err) assert.Empty(t, results) } @@ -231,7 +230,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(ctx, query.MustParse(tc.q)) + results, err := indexer.Search(ctx, query.MustCompile(tc.q)) require.NoError(t, err) for _, txr := range results { for _, tr := range tc.results { @@ -255,7 +254,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { ctx := context.Background() - results, err := indexer.Search(ctx, query.MustParse("account.number >= 1")) + results, err := indexer.Search(ctx, query.MustCompile(`account.number >= 1`)) assert.NoError(t, err) assert.Len(t, results, 1) @@ -312,7 +311,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { ctx := context.Background() - results, err := indexer.Search(ctx, query.MustParse("account.number >= 1")) + results, err := indexer.Search(ctx, query.MustCompile(`account.number >= 1`)) assert.NoError(t, err) require.Len(t, results, 3) @@ -334,7 +333,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := ioutil.TempDir("", "tx_index_db") + dir, err := os.MkdirTemp("", "tx_index_db") require.NoError(b, err) defer os.RemoveAll(dir) diff --git a/internal/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go index 0da7fc6837..dea5d570f8 100644 --- a/internal/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index e66aad071e..563183437f 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -121,6 +121,22 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } +// LoadBlockMetaByHash provides a mock function with given fields: hash +func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { + ret := _m.Called(hash) + + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockMeta) + } + } + + return r0 +} + // LoadBlockPart provides a mock function with given fields: height, index func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { ret := _m.Called(height, index) diff --git a/internal/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go index b8a8fc4648..97e3aff76e 100644 --- a/internal/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 4452f9bec3..02c69d3e05 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -29,6 +29,20 @@ func (_m *Store) Bootstrap(_a0 state.State) error { return r0 } +// Close provides a mock function with given fields: +func (_m *Store) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Load provides a mock function with given fields: func (_m *Store) Load() (state.State, error) { ret := _m.Called() diff --git a/internal/state/rollback.go b/internal/state/rollback.go index 6e13da0e29..ea0eff4deb 100644 --- a/internal/state/rollback.go +++ b/internal/state/rollback.go @@ -19,18 +19,35 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { return -1, nil, errors.New("no state found") } - rollbackHeight := invalidState.LastBlockHeight + height := bs.Height() + + // NOTE: persistence of state and blocks don't happen atomically. Therefore it is possible that + // when the user stopped the node the state wasn't updated but the blockstore was. In this situation + // we don't need to rollback any state and can just return early + if height == invalidState.LastBlockHeight+1 { + return invalidState.LastBlockHeight, invalidState.AppHash, nil + } + + // If the state store isn't one below nor equal to the blockstore height than this violates the + // invariant + if height != invalidState.LastBlockHeight { + return -1, nil, fmt.Errorf("statestore height (%d) is not one below or equal to blockstore height (%d)", + invalidState.LastBlockHeight, height) + } + + // state store height is equal to blockstore height. We're good to proceed with rolling back state + rollbackHeight := invalidState.LastBlockHeight - 1 rollbackBlock := bs.LoadBlockMeta(rollbackHeight) if rollbackBlock == nil { return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) } - previousValidatorSet, err := ss.LoadValidators(rollbackHeight - 1) + previousLastValidatorSet, err := ss.LoadValidators(rollbackHeight) if err != nil { return -1, nil, err } - previousParams, err := ss.LoadConsensusParams(rollbackHeight) + previousParams, err := ss.LoadConsensusParams(rollbackHeight + 1) if err != nil { return -1, nil, err } @@ -38,13 +55,13 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { valChangeHeight := invalidState.LastHeightValidatorsChanged // this can only happen if the validator set changed since the last block if valChangeHeight > rollbackHeight { - valChangeHeight = rollbackHeight + valChangeHeight = rollbackHeight + 1 } paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged // this can only happen if params changed from the last block if paramsChangeHeight > rollbackHeight { - paramsChangeHeight = rollbackHeight + paramsChangeHeight = rollbackHeight + 1 } // build the new state from the old state and the prior block @@ -60,13 +77,13 @@ func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { ChainID: invalidState.ChainID, InitialHeight: invalidState.InitialHeight, - LastBlockHeight: invalidState.LastBlockHeight - 1, - LastBlockID: rollbackBlock.Header.LastBlockID, + LastBlockHeight: rollbackBlock.Header.Height, + LastBlockID: rollbackBlock.BlockID, LastBlockTime: rollbackBlock.Header.Time, NextValidators: invalidState.Validators, Validators: invalidState.LastValidators, - LastValidators: previousValidatorSet, + LastValidators: previousLastValidatorSet, LastHeightValidatorsChanged: valChangeHeight, ConsensusParams: previousParams, diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index ae5c8ee84a..d74889ed06 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -1,6 +1,7 @@ package state_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -14,75 +15,51 @@ import ( ) func TestRollback(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) - blockStore := &mocks.BlockStore{} var ( - height int64 = 100 - appVersion uint64 = 10 + height int64 = 100 + nextHeight int64 = 101 ) - valSet, _ := factory.RandValidatorSet(5, 10) + blockStore := &mocks.BlockStore{} + stateStore := setupStateStore(t, height) + initialState, err := stateStore.Load() + require.NoError(t, err) - params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion + // perform the rollback over a version bump newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 - - initialState := state.State{ - Version: state.Version{ - Consensus: version.Consensus{ - Block: version.BlockProtocol, - App: 10, - }, - Software: version.TMVersion, - }, - ChainID: factory.DefaultTestChainID, - InitialHeight: 10, - LastBlockID: factory.MakeBlockID(), - AppHash: factory.RandomHash(), - LastResultsHash: factory.RandomHash(), - LastBlockHeight: height, - LastValidators: valSet, - Validators: valSet.CopyIncrementProposerPriority(1), - NextValidators: valSet.CopyIncrementProposerPriority(2), - LastHeightValidatorsChanged: height + 1, - ConsensusParams: *params, - LastHeightConsensusParamsChanged: height + 1, - } - require.NoError(t, stateStore.Bootstrap(initialState)) - - height++ - block := &types.BlockMeta{ - Header: types.Header{ - Height: height, - AppHash: initialState.AppHash, - LastBlockID: initialState.LastBlockID, - LastResultsHash: initialState.LastResultsHash, - }, - } - blockStore.On("LoadBlockMeta", height).Return(block) - - appVersion++ - newParams.Version.AppVersion = appVersion + newParams.Version.AppVersion = 11 + newParams.Block.MaxBytes = 1000 nextState := initialState.Copy() - nextState.LastBlockHeight = height - nextState.Version.Consensus.App = appVersion + nextState.LastBlockHeight = nextHeight + nextState.Version.Consensus.App = 11 nextState.LastBlockID = factory.MakeBlockID() nextState.AppHash = factory.RandomHash() nextState.LastValidators = initialState.Validators nextState.Validators = initialState.NextValidators nextState.NextValidators = initialState.NextValidators.CopyIncrementProposerPriority(1) nextState.ConsensusParams = *newParams - nextState.LastHeightConsensusParamsChanged = height + 1 - nextState.LastHeightValidatorsChanged = height + 1 + nextState.LastHeightConsensusParamsChanged = nextHeight + 1 + nextState.LastHeightValidatorsChanged = nextHeight + 1 // update the state require.NoError(t, stateStore.Save(nextState)) + block := &types.BlockMeta{ + BlockID: initialState.LastBlockID, + Header: types.Header{ + Height: initialState.LastBlockHeight, + AppHash: initialState.AppHash, + LastBlockID: factory.MakeBlockID(), + LastResultsHash: initialState.LastResultsHash, + }, + } + blockStore.On("LoadBlockMeta", initialState.LastBlockHeight).Return(block) + blockStore.On("Height").Return(nextHeight) + // rollback the state rollbackHeight, rollbackHash, err := state.Rollback(blockStore, stateStore) require.NoError(t, err) - require.EqualValues(t, int64(100), rollbackHeight) + require.EqualValues(t, height, rollbackHeight) require.EqualValues(t, initialState.AppHash, rollbackHash) blockStore.AssertExpectations(t) @@ -102,19 +79,37 @@ func TestRollbackNoState(t *testing.T) { } func TestRollbackNoBlocks(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + const height = int64(100) + + stateStore := setupStateStore(t, height) blockStore := &mocks.BlockStore{} - var ( - height int64 = 100 - appVersion uint64 = 10 - ) + blockStore.On("Height").Return(height) + blockStore.On("LoadBlockMeta", height-1).Return(nil) - valSet, _ := factory.RandValidatorSet(5, 10) + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 99 not found") +} + +func TestRollbackDifferentStateHeight(t *testing.T) { + const height = int64(100) + stateStore := setupStateStore(t, height) + blockStore := &mocks.BlockStore{} + blockStore.On("Height").Return(height + 2) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Equal(t, err.Error(), "statestore height (100) is not one below or equal to blockstore height (102)") +} + +func setupStateStore(t *testing.T, height int64) state.Store { + stateStore := state.NewStore(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + valSet, _ := factory.ValidatorSet(ctx, t, 5, 10) params := types.DefaultConsensusParams() - params.Version.AppVersion = appVersion - newParams := types.DefaultConsensusParams() - newParams.Block.MaxBytes = 10000 + params.Version.AppVersion = 10 initialState := state.State{ Version: state.Version{ @@ -137,10 +132,6 @@ func TestRollbackNoBlocks(t *testing.T) { ConsensusParams: *params, LastHeightConsensusParamsChanged: height + 1, } - require.NoError(t, stateStore.Save(initialState)) - blockStore.On("LoadBlockMeta", height).Return(nil) - - _, _, err := state.Rollback(blockStore, stateStore) - require.Error(t, err) - require.Contains(t, err.Error(), "block at height 100 not found") + require.NoError(t, stateStore.Bootstrap(initialState)) + return stateStore } diff --git a/internal/state/services.go b/internal/state/services.go index 49388cc126..2c9d312fb4 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -29,6 +29,7 @@ type BlockStore interface { PruneBlocks(height int64) (uint64, error) LoadBlockByHash(hash []byte) *types.Block + LoadBlockMetaByHash(hash []byte) *types.BlockMeta LoadBlockPart(height int64, index int) *types.Part LoadBlockCommit(height int64) *types.Commit diff --git a/internal/state/state.go b/internal/state/state.go index 1e0a0c3542..e83dca88ea 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -4,12 +4,13 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" - tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmtime "github.com/tendermint/tendermint/libs/time" + tmbytes "github.com/tendermint/tendermint/libs/bytes" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/types" @@ -261,52 +262,26 @@ func (state State) MakeBlock( messages []types.Message, commit *types.Commit, proposerAddress []byte, -) (*types.Block, *types.PartSet) { +) (*types.Block, *types.PartSet, error) { // Build base block with block data. block := types.MakeBlock(height, txs, evidence, intermediateStateRoots, messages, commit) - // Set time. - var timestamp time.Time - if height == state.InitialHeight { - timestamp = state.LastBlockTime // genesis time - } else { - timestamp = MedianTime(commit, state.LastValidators) - } - // Fill rest of header with state data. block.Header.Populate( state.Version.Consensus, state.ChainID, - timestamp, state.LastBlockID, + tmtime.Now(), state.LastBlockID, state.Validators.Hash(), state.NextValidators.Hash(), state.ConsensusParams.HashConsensusParams(), state.AppHash, state.LastResultsHash, proposerAddress, ) - return block, block.MakePartSet(types.BlockPartSizeBytes) -} - -// MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the -// corresponding validator set. The computed time is always between timestamps of -// the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the -// computed value. -func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { - weightedTimes := make([]*weightedTime, len(commit.Signatures)) - totalVotingPower := int64(0) - - for i, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue - } - _, validator := validators.GetByAddress(commitSig.ValidatorAddress) - // If there's no condition, TestValidateBlockCommit panics; not needed normally. - if validator != nil { - totalVotingPower += validator.VotingPower - weightedTimes[i] = newWeightedTime(commitSig.Timestamp, validator.VotingPower) - } + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + return nil, nil, err } - return weightedMedian(weightedTimes, totalVotingPower) + return block, bps, nil } //------------------------------------------------------------------------ @@ -326,13 +301,13 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) + genDocJSON, err := os.ReadFile(genDocFile) if err != nil { - return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) + return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } genDoc, err := types.GenesisDocFromJSON(genDocJSON) if err != nil { - return nil, fmt.Errorf("error reading GenesisDoc: %v", err) + return nil, fmt.Errorf("error reading GenesisDoc: %w", err) } return genDoc, nil } diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 8c0144abde..d5c6a649de 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -26,7 +26,9 @@ import ( // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - cfg := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot("state_") + require.NoError(t, err) + dbType := dbm.BackendType(cfg.DBBackend) stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(t, err) @@ -49,18 +51,16 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { func TestStateCopy(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) stateCopy := state.Copy() - assert.True(state.Equals(stateCopy), - fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - stateCopy, state)) + assert.True(t, state.Equals(stateCopy), + "expected state and its copy to be identical.\ngot: %v\nexpected: %v", + stateCopy, state) stateCopy.LastBlockHeight++ stateCopy.LastValidators = state.Validators - assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same - %v`, state)) + assert.False(t, state.Equals(stateCopy), "expected states to be different. got same %v", state) } // TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. @@ -71,7 +71,7 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { } require.Nil(t, doc.ValidateAndComplete()) state, err := sm.MakeGenesisState(&doc) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, 0, len(state.Validators.Validators)) require.Equal(t, 0, len(state.NextValidators.Validators)) } @@ -81,7 +81,6 @@ func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) - assert := assert.New(t) state.LastBlockHeight++ state.LastValidators = state.Validators @@ -90,9 +89,9 @@ func TestStateSaveLoad(t *testing.T) { loadedState, err := stateStore.Load() require.NoError(t, err) - assert.True(state.Equals(loadedState), - fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - loadedState, state)) + assert.True(t, state.Equals(loadedState), + "expected state and its copy to be identical.\ngot: %v\nexpected: %v", + loadedState, state) } // TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. @@ -100,12 +99,12 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) - assert := assert.New(t) state.LastBlockHeight++ // Build mock responses. - block := statefactory.MakeBlock(state, 2, new(types.Commit)) + block, err := statefactory.MakeBlock(state, 2, new(types.Commit)) + require.NoError(t, err) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -120,17 +119,16 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { err = stateStore.SaveABCIResponses(block.Height, abciResponses) require.NoError(t, err) loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) - assert.Nil(err) - assert.Equal(abciResponses, loadedABCIResponses, - fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", - loadedABCIResponses, abciResponses)) + require.NoError(t, err) + assert.Equal(t, abciResponses, loadedABCIResponses, + "ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + loadedABCIResponses, abciResponses) } // TestResultsSaveLoad tests saving and loading ABCI results. func TestABCIResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) stateStore := sm.NewStore(stateDB) @@ -183,7 +181,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { for i := range cases { h := int64(i + 1) res, err := stateStore.LoadABCIResponses(h) - assert.Error(err, "%d: %#v", i, res) + assert.Error(t, err, "%d: %#v", i, res) } // Add all cases. @@ -202,14 +200,14 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { for i, tc := range cases { h := int64(i + 1) res, err := stateStore.LoadABCIResponses(h) - if assert.NoError(err, "%d", i) { + if assert.NoError(t, err, "%d", i) { t.Log(res) responses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, DeliverTxs: tc.expected, EndBlock: &abci.ResponseEndBlock{}, } - assert.Equal(sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) + assert.Equal(t, sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) } } } @@ -218,23 +216,22 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { func TestValidatorSimpleSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) statestore := sm.NewStore(stateDB) // Can't load anything for height 0. _, err := statestore.LoadValidators(0) - assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") + assert.IsType(t, sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. v, err := statestore.LoadValidators(1) - assert.Nil(err, "expected no err at height 1") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") + require.NoError(t, err, "expected no err at height 1") + assert.Equal(t, v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. v, err = statestore.LoadValidators(2) - assert.Nil(err, "expected no err at height 2") - assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") + require.NoError(t, err, "expected no err at height 2") + assert.Equal(t, v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ @@ -242,11 +239,11 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { err = statestore.Save(state) require.NoError(t, err) vp0, err := statestore.LoadValidators(nextHeight + 0) - assert.Nil(err, "expected no err") + assert.NoError(t, err) vp1, err := statestore.LoadValidators(nextHeight + 1) - assert.Nil(err, "expected no err") - assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") - assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") + assert.NoError(t, err) + assert.Equal(t, vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") + assert.Equal(t, vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. @@ -273,7 +270,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ power++ } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) @@ -298,7 +295,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { for i, power := range testCases { v, err := stateStore.LoadValidators(int64(i + 1 + 1)) // +1 because vset changes delayed by 1 block. - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) + assert.NoError(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -308,6 +305,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } func TestProposerFrequency(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // some explicit test cases testCases := []struct { @@ -368,7 +367,7 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(mrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = mrand.Int63() @@ -447,8 +446,11 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -561,8 +563,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -748,8 +753,11 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -777,8 +785,13 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -792,8 +805,13 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + + bps, err = block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -825,8 +843,12 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) } @@ -840,8 +862,14 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + + block, err = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + + bps, err = block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) @@ -861,8 +889,13 @@ func TestLargeGenesisValidator(t *testing.T) { } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { @@ -886,8 +919,13 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block, err := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) + require.NoError(t, err) + + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -914,11 +952,11 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { nextHeight := state.LastBlockHeight + 1 v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + assert.NoError(t, err) acc0 := v0.Validators[0].ProposerPriority v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + assert.NoError(t, err) acc1 := v1.Validators[0].ProposerPriority assert.NotEqual(t, acc1, acc0, "expected ProposerPriority value to change between heights") @@ -942,21 +980,21 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() // Swap the first validator with a new one (validator set size stays the same). - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(t, state, pubkey) // Save state etc. var validatorUpdates []*types.Validator validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) - require.Nil(t, err) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 err = stateStore.Save(state) require.NoError(t, err) // Load nextheight, it should be the oldpubkey. v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) assert.NotNil(t, val) @@ -966,7 +1004,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Load nextheight+1, it should be the new pubkey. v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) assert.NotNil(t, val) @@ -981,7 +1019,8 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := statefactory.MakeBlock(state, 2, new(types.Commit)) + block, err := statefactory.MakeBlock(state, 2, new(types.Commit)) + require.NoError(t, err) // test we set some fields assert.Equal(t, stateVersion, block.Version) @@ -1022,12 +1061,12 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, blockID, responses := makeHeaderPartsResponsesParams(state, &cp) + header, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) - require.Nil(t, err) + require.NoError(t, err) err := stateStore.Save(state) require.NoError(t, err) } @@ -1049,7 +1088,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { for _, testCase := range testCases { p, err := stateStore.LoadConsensusParams(testCase.height) - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) + assert.NoError(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.EqualValues(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) } diff --git a/internal/state/store.go b/internal/state/store.go index aff165aa12..de17be0d7f 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -11,7 +11,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" - tmos "github.com/tendermint/tendermint/libs/os" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -93,6 +92,8 @@ type Store interface { Bootstrap(State) error // PruneStates takes the height from which to prune up to (exclusive) PruneStates(int64) error + // Close closes the connection with the database + Close() error } // dbStore wraps a db (github.com/tendermint/tm-db) @@ -126,8 +127,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) { err = proto.Unmarshal(buf, sp) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } sm, err := FromProto(sp) @@ -424,8 +424,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er err = abciResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -544,8 +543,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error err = v.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -632,8 +630,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa paramsInfo := new(tmstate.ConsensusParamsInfo) if err = paramsInfo.Unmarshal(buf); err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf(`data has been corrupted or its spec has changed: %+v`, err)) } // TODO: ensure that buf is completely read. @@ -663,3 +660,7 @@ func (store dbStore) saveConsensusParamsInfo( return batch.Set(consensusParamsKey(nextHeight), bz) } + +func (store dbStore) Close() error { + return store.db.Close() +} diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 118350fff6..d7e5996109 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -1,7 +1,9 @@ package state_test import ( + "context" "fmt" + "math/rand" "os" "testing" @@ -28,13 +30,17 @@ const ( func TestStoreBootstrap(t *testing.T) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - val, _ := factory.RandValidator(true, 10) - val2, _ := factory.RandValidator(true, 10) - val3, _ := factory.RandValidator(true, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) + val2, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) + val3, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) vals := types.NewValidatorSet([]*types.Validator{val, val2, val3}) bootstrapState := makeRandomStateFromValidatorSet(vals, 100, 100) - err := stateStore.Bootstrap(bootstrapState) - require.NoError(t, err) + require.NoError(t, stateStore.Bootstrap(bootstrapState)) // bootstrap should also save the previous validator _, err = stateStore.LoadValidators(99) @@ -54,17 +60,20 @@ func TestStoreBootstrap(t *testing.T) { func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - val, _ := factory.RandValidator(true, 10) - val2, _ := factory.RandValidator(true, 10) - val3, _ := factory.RandValidator(true, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + val, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) + val2, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) + val3, _, err := factory.Validator(ctx, 10+int64(rand.Uint32())) + require.NoError(t, err) vals := types.NewValidatorSet([]*types.Validator{val, val2, val3}) // 1) LoadValidators loads validators using a height where they were last changed // Note that only the next validators at height h + 1 are saved - err := stateStore.Save(makeRandomStateFromValidatorSet(vals, 1, 1)) - require.NoError(t, err) - err = stateStore.Save(makeRandomStateFromValidatorSet(vals.CopyIncrementProposerPriority(1), 2, 1)) - require.NoError(t, err) + require.NoError(t, stateStore.Save(makeRandomStateFromValidatorSet(vals, 1, 1))) + require.NoError(t, stateStore.Save(makeRandomStateFromValidatorSet(vals.CopyIncrementProposerPriority(1), 2, 1))) loadedVals, err := stateStore.LoadValidators(3) require.NoError(t, err) require.Equal(t, vals.CopyIncrementProposerPriority(3), loadedVals) @@ -101,7 +110,9 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - cfg := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot("state_") + require.NoError(b, err) + defer os.RemoveAll(cfg.RootDir) dbType := dbm.BackendType(cfg.DBBackend) stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) @@ -139,9 +150,12 @@ func BenchmarkLoadValidators(b *testing.B) { } func TestStoreLoadConsensusParams(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - err := stateStore.Save(makeRandomStateFromConsensusParams(types.DefaultConsensusParams(), 1, 1)) + err := stateStore.Save(makeRandomStateFromConsensusParams(ctx, t, types.DefaultConsensusParams(), 1, 1)) require.NoError(t, err) params, err := stateStore.LoadConsensusParams(1) require.NoError(t, err) @@ -151,7 +165,7 @@ func TestStoreLoadConsensusParams(t *testing.T) { // it should save a pointer to the params at height 1 differentParams := types.DefaultConsensusParams() differentParams.Block.MaxBytes = 20000 - err = stateStore.Save(makeRandomStateFromConsensusParams(differentParams, 10, 1)) + err = stateStore.Save(makeRandomStateFromConsensusParams(ctx, t, differentParams, 10, 1)) require.NoError(t, err) res, err := stateStore.LoadConsensusParams(10) require.NoError(t, err) diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index 5d840035b7..4b263cab83 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -1,15 +1,20 @@ package factory import ( + "context" + "testing" "time" + "github.com/stretchr/testify/require" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/types" ) -func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { - blocks := make([]*types.Block, 0) +func MakeBlocks(ctx context.Context, t *testing.T, n int, state *sm.State, privVal types.PrivValidator) []*types.Block { + t.Helper() + + blocks := make([]*types.Block, n) var ( prevBlock *types.Block @@ -20,8 +25,9 @@ func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl for i := 0; i < n; i++ { height := int64(i + 1) - block, parts := makeBlockAndPartSet(*state, prevBlock, prevBlockMeta, privVal, height) - blocks = append(blocks, block) + block, parts := makeBlockAndPartSet(ctx, t, *state, prevBlock, prevBlockMeta, privVal, height) + + blocks[i] = block prevBlock = block prevBlockMeta = types.NewBlockMeta(block, parts) @@ -35,31 +41,48 @@ func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl return blocks } -func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { - block, _ := state.MakeBlock( +func MakeBlock(state sm.State, height int64, c *types.Commit) (*types.Block, error) { + block, _, err := state.MakeBlock( height, factory.MakeTenTxs(state.LastBlockHeight), nil, nil, nil, c, state.Validators.GetProposer().Address, ) - return block + if err != nil { + return nil, err + } + + return block, nil } -func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, - privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { +func makeBlockAndPartSet( + ctx context.Context, + t *testing.T, + state sm.State, + lastBlock *types.Block, + lastBlockMeta *types.BlockMeta, + privVal types.PrivValidator, + height int64, +) (*types.Block, *types.PartSet) { + t.Helper() lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) if height > 1 { - vote, _ := factory.MakeVote( + vote, err := factory.MakeVote( + ctx, privVal, lastBlock.Header.ChainID, 1, lastBlock.Header.Height, 0, 2, lastBlockMeta.BlockID, time.Now()) + require.NoError(t, err) lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } - return state.MakeBlock(height, []types.Tx{}, nil, nil, nil, lastCommit, state.Validators.GetProposer().Address) + block, partSet, err := state.MakeBlock(height, []types.Tx{}, nil, nil, nil, lastCommit, state.Validators.GetProposer().Address) + require.NoError(t, err) + + return block, partSet } diff --git a/internal/state/validation.go b/internal/state/validation.go index fbd285f8a4..900b7b7871 100644 --- a/internal/state/validation.go +++ b/internal/state/validation.go @@ -114,18 +114,11 @@ func validateBlock(state State, block *types.Block) error { state.LastBlockTime, ) } - medianTime := MedianTime(block.LastCommit, state.LastValidators) - if !block.Time.Equal(medianTime) { - return fmt.Errorf("invalid block time. Expected %v, got %v", - medianTime, - block.Time, - ) - } case block.Height == state.InitialHeight: genesisTime := state.LastBlockTime - if !block.Time.Equal(genesisTime) { - return fmt.Errorf("block time %v is not equal to genesis time %v", + if block.Time.Before(genesisTime) { + return fmt.Errorf("block time %v is before genesis time %v", block.Time, genesisTime, ) diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 723e9392ad..534d508da3 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -28,11 +28,13 @@ import ( const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(3, 1) + state, stateDB, privVals := makeState(t, 3, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( @@ -62,7 +64,6 @@ func TestValidateBlockHeader(t *testing.T) { {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }}, - {"Time wrong 2", func(block *types.Block) { block.Time = block.Time.Add(time.Second * 1) }}, {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartSetHeader.Total += 10 }}, {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, @@ -90,9 +91,10 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block := statefactory.MakeBlock(state, height, lastCommit) + block, err := statefactory.MakeBlock(state, height, lastCommit) + require.NoError(t, err) tc.malleateBlock(block) - err := blockExec.ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) t.Logf("%s: %v", tc.name, err) require.Error(t, err, tc.name) } @@ -100,31 +102,27 @@ func TestValidateBlockHeader(t *testing.T) { /* A good block passes */ - var err error - state, _, lastCommit, err = makeAndCommitGoodBlock( + state, _, lastCommit = makeAndCommitGoodBlock(ctx, t, state, height, lastCommit, state.Validators.GetProposer().Address, blockExec, privVals, nil) - require.NoError(t, err, "height %d", height) } nextHeight := validationTestsStopHeight - block, _ := state.MakeBlock( - nextHeight, - testfactory.MakeTenTxs(nextHeight), nil, nil, nil, - lastCommit, - state.Validators.GetProposer().Address, - ) + block, err := statefactory.MakeBlock(state, nextHeight, lastCommit) + require.NoError(t, err) state.InitialHeight = nextHeight + 1 - err := blockExec.ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err, "expected an error when state is ahead of block") assert.Contains(t, err.Error(), "lower than initial height") } func TestValidateBlockCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(1, 1) + state, stateDB, privVals := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( @@ -147,6 +145,7 @@ func TestValidateBlockCommit(t *testing.T) { */ // should be height-1 instead of height wrongHeightVote, err := testfactory.MakeVote( + ctx, privVals[proposerAddr.String()], chainID, 1, @@ -156,14 +155,15 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, time.Now(), ) - require.NoError(t, err, "height %d", height) + require.NoError(t, err) wrongHeightCommit := types.NewCommit( wrongHeightVote.Height, wrongHeightVote.Round, state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), nil, nil, nil, wrongHeightCommit, proposerAddr) + block, err := statefactory.MakeBlock(state, height, wrongHeightCommit) + require.NoError(t, err) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -171,7 +171,8 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block, _ = state.MakeBlock(height, testfactory.MakeTenTxs(height), nil, nil, nil, wrongSigsCommit, proposerAddr) + block, err = statefactory.MakeBlock(state, height, wrongSigsCommit) + require.NoError(t, err) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -184,9 +185,10 @@ func TestValidateBlockCommit(t *testing.T) { /* A good block passes */ - var err error var blockID types.BlockID - state, blockID, lastCommit, err = makeAndCommitGoodBlock( + state, blockID, lastCommit = makeAndCommitGoodBlock( + ctx, + t, state, height, lastCommit, @@ -195,12 +197,12 @@ func TestValidateBlockCommit(t *testing.T) { privVals, nil, ) - require.NoError(t, err, "height %d", height) /* wrongSigsCommit is fine except for the extra bad precommit */ goodVote, err := testfactory.MakeVote( + ctx, privVals[proposerAddr.String()], chainID, 1, @@ -210,9 +212,8 @@ func TestValidateBlockCommit(t *testing.T) { blockID, time.Now(), ) - require.NoError(t, err, "height %d", height) - - bpvPubKey, err := badPrivVal.GetPubKey(context.Background()) + require.NoError(t, err) + bpvPubKey, err := badPrivVal.GetPubKey(ctx) require.NoError(t, err) badVote := &types.Vote{ @@ -228,9 +229,9 @@ func TestValidateBlockCommit(t *testing.T) { g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote(context.Background(), chainID, g) + err = badPrivVal.SignVote(ctx, chainID, g) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(context.Background(), chainID, b) + err = badPrivVal.SignVote(ctx, chainID, b) require.NoError(t, err, "height %d", height) goodVote.Signature, badVote.Signature = g.Signature, b.Signature @@ -241,11 +242,13 @@ func TestValidateBlockCommit(t *testing.T) { } func TestValidateBlockEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(4, 1) + state, stateDB, privVals := makeState(t, 4, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) @@ -278,13 +281,15 @@ func TestValidateBlockEvidence(t *testing.T) { var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { - newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), + newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time.Now(), privVals[proposerAddr.String()], chainID) + require.NoError(t, err) evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), evidence, nil, nil, lastCommit, proposerAddr) - err := blockExec.ValidateBlock(state, block) + block, _, err := state.MakeBlock(height, testfactory.MakeTenTxs(height), evidence, nil, nil, lastCommit, proposerAddr) + assert.NoError(t, err) + err = blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d but got %v", height, err) @@ -298,8 +303,9 @@ func TestValidateBlockEvidence(t *testing.T) { var currentBytes int64 // precisely the amount of allowed evidence for { - newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, + newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, defaultEvidenceTime, privVals[proposerAddr.String()], chainID) + require.NoError(t, err) currentBytes += int64(len(newEv.Bytes())) if currentBytes >= maxBytesEvidence { break @@ -307,8 +313,9 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) } - var err error - state, _, lastCommit, err = makeAndCommitGoodBlock( + state, _, lastCommit = makeAndCommitGoodBlock( + ctx, + t, state, height, lastCommit, @@ -317,6 +324,6 @@ func TestValidateBlockEvidence(t *testing.T) { privVals, evidence, ) - require.NoError(t, err, "height %d", height) + } } diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index ad28efac94..364a7f5b29 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -1,6 +1,7 @@ package statesync import ( + "context" "math/rand" "sync" "testing" @@ -22,6 +23,9 @@ var ( ) func TestBlockQueueBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) @@ -35,7 +39,7 @@ func TestBlockQueueBasic(t *testing.T) { for { select { case height := <-queue.nextHeight(): - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-queue.done(): wg.Done() return @@ -69,6 +73,9 @@ loop: // Test with spurious failures and retries func TestBlockQueueWithFailures(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) @@ -85,7 +92,7 @@ func TestBlockQueueWithFailures(t *testing.T) { if rand.Intn(failureRate) == 0 { queue.retry(height) } else { - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) } case <-queue.done(): wg.Done() @@ -125,6 +132,9 @@ func TestBlockQueueBlocks(t *testing.T) { expectedHeight := startHeight retryHeight := stopHeight + 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + loop: for { select { @@ -132,7 +142,7 @@ loop: require.Equal(t, height, expectedHeight) require.GreaterOrEqual(t, height, stopHeight) expectedHeight-- - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-time.After(1 * time.Second): if expectedHeight >= stopHeight { t.Fatalf("expected next height %d", expectedHeight) @@ -171,12 +181,15 @@ func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) defer queue.close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + loop: for { select { case height := <-queue.nextHeight(): require.GreaterOrEqual(t, height, stopHeight) - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-time.After(1 * time.Second): break loop } @@ -184,7 +197,7 @@ loop: require.Len(t, queue.pending, int(startHeight-stopHeight)+1) - queue.add(mockLBResp(t, peerID, stopHeight-1, endTime)) + queue.add(mockLBResp(ctx, t, peerID, stopHeight-1, endTime)) require.Len(t, queue.pending, int(startHeight-stopHeight)+1) } @@ -197,6 +210,9 @@ func TestBlockQueueStopTime(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) wg := &sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseTime := stopTime.Add(-50 * time.Second) // asynchronously fetch blocks and add it to the queue @@ -207,7 +223,7 @@ func TestBlockQueueStopTime(t *testing.T) { select { case height := <-queue.nextHeight(): blockTime := baseTime.Add(time.Duration(height) * time.Second) - queue.add(mockLBResp(t, peerID, height, blockTime)) + queue.add(mockLBResp(ctx, t, peerID, height, blockTime)) case <-queue.done(): wg.Done() return @@ -241,6 +257,9 @@ func TestBlockQueueInitialHeight(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, 1) wg := &sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // asynchronously fetch blocks and add it to the queue for i := 0; i <= numWorkers; i++ { wg.Add(1) @@ -249,7 +268,7 @@ func TestBlockQueueInitialHeight(t *testing.T) { select { case height := <-queue.nextHeight(): require.GreaterOrEqual(t, height, initialHeight) - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-queue.done(): wg.Done() return @@ -273,9 +292,10 @@ loop: } } -func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { - vals, pv := factory.RandValidatorSet(3, 10) - _, _, lb := mockLB(t, height, time, factory.MakeBlockID(), vals, pv) +func mockLBResp(ctx context.Context, t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { + t.Helper() + vals, pv := factory.ValidatorSet(ctx, t, 3, 10) + _, _, lb := mockLB(ctx, t, height, time, factory.MakeBlockID(), vals, pv) return lightBlockResponse{ block: lb, peer: peer, diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 84b6971b8b..6f63876372 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -3,13 +3,12 @@ package statesync import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -29,7 +28,7 @@ type chunk struct { // iterator over all chunks, but callers can request chunks to be retried, optionally after // refetching. type chunkQueue struct { - tmsync.Mutex + sync.Mutex snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file @@ -42,7 +41,7 @@ type chunkQueue struct { // newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { - dir, err := ioutil.TempDir(tempDir, "tm-statesync") + dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } @@ -87,7 +86,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := ioutil.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -229,7 +228,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { return nil, nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index e17c170bd9..c3604df9d5 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -1,7 +1,6 @@ package statesync import ( - "io/ioutil" "os" "testing" @@ -36,20 +35,20 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := ioutil.TempDir("", "newchunkqueue") + dir, err := os.MkdirTemp("", "newchunkqueue") require.NoError(t, err) defer os.RemoveAll(dir) queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 1) err = queue.Close() require.NoError(t, err) - files, err = ioutil.ReadDir(dir) + files, err = os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 0) } diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 844cb5e323..9cdb349784 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -26,18 +26,16 @@ var ( // NOTE: It is not the responsibility of the dispatcher to verify the light blocks. type Dispatcher struct { // the channel with which to send light block requests on - requestCh chan<- p2p.Envelope - closeCh chan struct{} + requestCh *p2p.Channel mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer calls map[types.NodeID]chan *types.LightBlock } -func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { +func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher { return &Dispatcher{ - requestCh: requestCh, - closeCh: make(chan struct{}), + requestCh: requestChannel, calls: make(map[types.NodeID]chan *types.LightBlock), } } @@ -47,7 +45,7 @@ func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { // LightBlock response is used to signal that the peer doesn't have the requested LightBlock. func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { // dispatch the request to the peer - callCh, err := d.dispatch(peer, height) + callCh, err := d.dispatch(ctx, peer, height) if err != nil { return nil, err } @@ -69,19 +67,16 @@ func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.No case <-ctx.Done(): return nil, ctx.Err() - - case <-d.closeCh: - return nil, errDisconnected } } // dispatch takes a peer and allocates it a channel so long as it's not already // busy and the receiving channel is still running. It then dispatches the message -func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { +func (d *Dispatcher) dispatch(ctx context.Context, peer types.NodeID, height int64) (chan *types.LightBlock, error) { d.mtx.Lock() defer d.mtx.Unlock() select { - case <-d.closeCh: + case <-ctx.Done(): return nil, errDisconnected default: } @@ -96,11 +91,14 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh d.calls[peer] = ch // send request - d.requestCh <- p2p.Envelope{ + if err := d.requestCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.LightBlockRequest{ Height: uint64(height), }, + }); err != nil { + close(ch) + return ch, err } return ch, nil @@ -109,7 +107,7 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh // Respond allows the underlying process which receives requests on the // requestCh to respond with the respective light block. A nil response is used to // represent that the receiver of the request does not have a light block at that height. -func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { +func (d *Dispatcher) Respond(ctx context.Context, lb *tmproto.LightBlock, peer types.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -123,8 +121,12 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { // If lb is nil we take that to mean that the peer didn't have the requested light // block and thus pass on the nil to the caller. if lb == nil { - answerCh <- nil - return nil + select { + case answerCh <- nil: + return nil + case <-ctx.Done(): + return ctx.Err() + } } block, err := types.LightBlockFromProto(lb) @@ -132,8 +134,12 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { return err } - answerCh <- block - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case answerCh <- block: + return nil + } } // Close shuts down the dispatcher and cancels any pending calls awaiting responses. @@ -141,17 +147,14 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { func (d *Dispatcher) Close() { d.mtx.Lock() defer d.mtx.Unlock() - close(d.closeCh) - for peer, call := range d.calls { + for peer := range d.calls { delete(d.calls, peer) - close(call) + // don't close the channel here as it's closed in + // other handlers, and would otherwise get garbage + // collected. } } -func (d *Dispatcher) Done() <-chan struct{} { - return d.closeCh -} - //---------------------------------------------------------------- // BlockProvider is a p2p based light provider which uses a dispatcher connected @@ -192,7 +195,7 @@ func (p *BlockProvider) LightBlock(ctx context.Context, height int64) (*types.Li case errPeerAlreadyBusy: return nil, provider.ErrLightBlockNotFound default: - return nil, provider.ErrUnreliableProvider{Reason: err.Error()} + return nil, provider.ErrUnreliableProvider{Reason: err} } // check that the height requested is the same one returned @@ -221,6 +224,9 @@ func (p *BlockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) e // String implements stringer interface func (p *BlockProvider) String() string { return string(p.peer) } +// Returns the ID address of the provider (NodeID of peer) +func (p *BlockProvider) ID() string { return string(p.peer) } + //---------------------------------------------------------------- // peerList is a rolling list of peers. This is used to distribute the load of diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index e5a6a85cd3..65c517be43 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -18,16 +18,32 @@ import ( "github.com/tendermint/tendermint/types" ) +type channelInternal struct { + In chan p2p.Envelope + Out chan p2p.Envelope + Error chan p2p.PeerError +} + +func testChannel(size int) (*channelInternal, *p2p.Channel) { + in := &channelInternal{ + In: make(chan p2p.Envelope, size), + Out: make(chan p2p.Envelope, size), + Error: make(chan p2p.PeerError, size), + } + return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error) +} + func TestDispatcherBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) const numPeers = 5 - ch := make(chan p2p.Envelope, 100) - closeCh := make(chan struct{}) - defer close(closeCh) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(numPeers) wg := sync.WaitGroup{} @@ -38,7 +54,7 @@ func TestDispatcherBasic(t *testing.T) { wg.Add(1) go func(height int64) { defer wg.Done() - lb, err := d.LightBlock(context.Background(), height, peers[height-1]) + lb, err := d.LightBlock(ctx, height, peers[height-1]) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, lb.Height, height) @@ -52,31 +68,40 @@ func TestDispatcherBasic(t *testing.T) { func TestDispatcherReturnsNoBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) + d := NewDispatcher(ch) - doneCh := make(chan struct{}) - peer := factory.NodeID("a") + + peer := factory.NodeID(t, "a") go func() { - <-ch - require.NoError(t, d.Respond(nil, peer)) - close(doneCh) + <-chans.Out + require.NoError(t, d.Respond(ctx, nil, peer)) + cancel() }() - lb, err := d.LightBlock(context.Background(), 1, peer) - <-doneCh + lb, err := d.LightBlock(ctx, 1, peer) + <-ctx.Done() require.Nil(t, lb) - require.Nil(t, err) + require.NoError(t, err) } func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, ch := testChannel(100) d := NewDispatcher(ch) - peer := factory.NodeID("a") + peer := factory.NodeID(t, "a") - ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Millisecond) defer cancelFunc() lb, err := d.LightBlock(ctx, 1, peer) @@ -89,13 +114,15 @@ func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { func TestDispatcherProviders(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) chainID := "test-chain" - closeCh := make(chan struct{}) - defer close(closeCh) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(5) providers := make([]*BlockProvider, len(peers)) @@ -106,7 +133,7 @@ func TestDispatcherProviders(t *testing.T) { for i, p := range providers { assert.Equal(t, string(peers[i]), p.String(), i) - lb, err := p.LightBlock(context.Background(), 10) + lb, err := p.LightBlock(ctx, 10) assert.NoError(t, err) assert.NotNil(t, lb) } @@ -114,6 +141,10 @@ func TestDispatcherProviders(t *testing.T) { func TestPeerListBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() assert.Zero(t, peerList.Len()) numPeers := 10 @@ -176,7 +207,10 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { peerList := newPeerList() require.Zero(t, peerList.Len()) doneCh := make(chan struct{}) - ctx := context.Background() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wrapped, cancel := context.WithCancel(ctx) go func() { peerList.Pop(wrapped) @@ -199,6 +233,9 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { func TestPeerListConcurrent(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() numPeers := 10 @@ -229,7 +266,6 @@ func TestPeerListConcurrent(t *testing.T) { // we use a context with cancel and a separate go routine to wait for all // the other goroutines to close. - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -264,17 +300,17 @@ func TestPeerListRemove(t *testing.T) { // handleRequests is a helper function usually run in a separate go routine to // imitate the expected responses of the reactor wired to the dispatcher -func handleRequests(t *testing.T, d *Dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { +func handleRequests(ctx context.Context, t *testing.T, d *Dispatcher, ch chan p2p.Envelope) { t.Helper() for { select { case request := <-ch: height := request.Message.(*ssproto.LightBlockRequest).Height peer := request.To - resp := mockLBResp(t, peer, int64(height), time.Now()) + resp := mockLBResp(ctx, t, peer, int64(height), time.Now()) block, _ := resp.block.ToProto() - require.NoError(t, d.Respond(block, resp.peer)) - case <-closeCh: + require.NoError(t, d.Respond(ctx, block, resp.peer)) + case <-ctx.Done(): return } } diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 939fb409ce..b1e286ad87 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -8,11 +8,12 @@ import ( "reflect" "runtime/debug" "sort" + "sync" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -71,9 +72,9 @@ const ( maxLightBlockRequestRetries = 20 ) -func GetChannelDescriptors() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { +func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { + return map[p2p.ChannelID]*p2p.ChannelDescriptor{ + SnapshotChannel: { ID: SnapshotChannel, MessageType: new(ssproto.Message), @@ -82,7 +83,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvMessageCapacity: snapshotMsgSize, RecvBufferCapacity: 128, }, - { + ChunkChannel: { ID: ChunkChannel, Priority: 3, MessageType: new(ssproto.Message), @@ -90,7 +91,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvMessageCapacity: chunkMsgSize, RecvBufferCapacity: 128, }, - { + LightBlockChannel: { ID: LightBlockChannel, MessageType: new(ssproto.Message), Priority: 5, @@ -98,7 +99,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor { RecvMessageCapacity: lightBlockMsgSize, RecvBufferCapacity: 128, }, - { + ParamsChannel: { ID: ParamsChannel, MessageType: new(ssproto.Message), Priority: 2, @@ -126,6 +127,7 @@ type Metricer interface { // serving snapshots for other nodes. type Reactor struct { service.BaseService + logger log.Logger chainID string initialHeight int64 @@ -141,7 +143,6 @@ type Reactor struct { blockCh *p2p.Channel paramsCh *p2p.Channel peerUpdates *p2p.PeerUpdates - closeCh chan struct{} // Dispatcher is used to multiplex light block requests and responses over multiple // peers used by the p2p state provider and in reverse sync. @@ -151,11 +152,12 @@ type Reactor struct { // These will only be set when a state sync is in progress. It is used to feed // received snapshots and chunks into the syncer and manage incoming and outgoing // providers. - mtx tmsync.RWMutex + mtx sync.RWMutex syncer *syncer providers map[types.NodeID]*BlockProvider stateProvider StateProvider + eventBus *eventbus.EventBus metrics *Metrics backfillBlockTotal int64 backfilledBlocks int64 @@ -166,20 +168,43 @@ type Reactor struct { // and querying, references to p2p Channels and a channel to listen for peer // updates on. Note, the reactor will close all p2p Channels when stopping. func NewReactor( + ctx context.Context, chainID string, initialHeight int64, cfg config.StateSyncConfig, logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - snapshotCh, chunkCh, blockCh, paramsCh *p2p.Channel, + channelCreator p2p.ChannelCreator, peerUpdates *p2p.PeerUpdates, stateStore sm.Store, blockStore *store.BlockStore, tempDir string, ssMetrics *Metrics, -) *Reactor { + eventBus *eventbus.EventBus, +) (*Reactor, error) { + + chDesc := getChannelDescriptors() + + snapshotCh, err := channelCreator(ctx, chDesc[SnapshotChannel]) + if err != nil { + return nil, err + } + chunkCh, err := channelCreator(ctx, chDesc[ChunkChannel]) + if err != nil { + return nil, err + } + blockCh, err := channelCreator(ctx, chDesc[LightBlockChannel]) + if err != nil { + return nil, err + } + paramsCh, err := channelCreator(ctx, chDesc[ParamsChannel]) + if err != nil { + return nil, err + } + r := &Reactor{ + logger: logger, chainID: chainID, initialHeight: initialHeight, cfg: cfg, @@ -190,18 +215,18 @@ func NewReactor( blockCh: blockCh, paramsCh: paramsCh, peerUpdates: peerUpdates, - closeCh: make(chan struct{}), tempDir: tempDir, stateStore: stateStore, blockStore: blockStore, peers: newPeerList(), - dispatcher: NewDispatcher(blockCh.Out), + dispatcher: NewDispatcher(blockCh), providers: make(map[types.NodeID]*BlockProvider), metrics: ssMetrics, + eventBus: eventBus, } r.BaseService = *service.NewBaseService(logger, "StateSync", r) - return r + return r, nil } // OnStart starts separate go routines for each p2p Channel and listens for @@ -210,16 +235,12 @@ func NewReactor( // handle individual envelopes as to not have to deal with bounding workers or pools. // The caller must be sure to execute OnStop to ensure the outbound p2p Channels are // closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processSnapshotCh() - - go r.processChunkCh() - - go r.processBlockCh() - - go r.processParamsCh() - - go r.processPeerUpdates() +func (r *Reactor) OnStart(ctx context.Context) error { + go r.processCh(ctx, r.snapshotCh, "snapshot") + go r.processCh(ctx, r.chunkCh, "chunk") + go r.processCh(ctx, r.blockCh, "light block") + go r.processCh(ctx, r.paramsCh, "consensus params") + go r.processPeerUpdates(ctx) return nil } @@ -229,21 +250,14 @@ func (r *Reactor) OnStart() error { func (r *Reactor) OnStop() { // tell the dispatcher to stop sending any more requests r.dispatcher.Close() - // wait for any remaining requests to complete - <-r.dispatcher.Done() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.peerUpdates.Done() - <-r.snapshotCh.Done() - <-r.chunkCh.Done() - <-r.blockCh.Done() - <-r.paramsCh.Done() +} + +func (r *Reactor) PublishStatus(ctx context.Context, event types.EventDataStateSyncStatus) error { + if r.eventBus == nil { + return errors.New("event system is not configured") + } + + return r.eventBus.PublishEventStateSyncStatus(ctx, event) } // Sync runs a state sync, fetching snapshots and providing chunks to the @@ -271,13 +285,12 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.syncer = newSyncer( r.cfg, - r.Logger, + r.logger, r.conn, r.connQuery, r.stateProvider, - r.snapshotCh.Out, - r.chunkCh.Out, - r.snapshotCh.Done(), + r.snapshotCh, + r.chunkCh, r.tempDir, r.metrics, ) @@ -290,18 +303,12 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.mtx.Unlock() }() - requestSnapshotsHook := func() { + requestSnapshotsHook := func() error { // request snapshots from all currently connected peers - msg := p2p.Envelope{ + return r.snapshotCh.Send(ctx, p2p.Envelope{ Broadcast: true, Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-ctx.Done(): - case <-r.closeCh: - case r.snapshotCh.Out <- msg: - } + }) } state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) @@ -321,7 +328,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { err = r.Backfill(ctx, state) if err != nil { - r.Logger.Error("backfill failed. Proceeding optimistically...", "err", err) + r.logger.Error("backfill failed. Proceeding optimistically...", "err", err) } return state, nil @@ -359,7 +366,7 @@ func (r *Reactor) backfill( trustedBlockID types.BlockID, stopTime time.Time, ) error { - r.Logger.Info("starting backfill process...", "startHeight", startHeight, + r.logger.Info("starting backfill process...", "startHeight", startHeight, "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) r.backfillBlockTotal = startHeight - stopHeight + 1 @@ -384,10 +391,12 @@ func (r *Reactor) backfill( go func() { for { select { + case <-ctx.Done(): + return case height := <-queue.nextHeight(): // pop the next peer of the list to send a request to peer := r.peers.Pop(ctx) - r.Logger.Debug("fetching next block", "height", height, "peer", peer) + r.logger.Debug("fetching next block", "height", height, "peer", peer) subCtx, cancel := context.WithTimeout(ctxWithCancel, lightBlockResponseTimeout) defer cancel() lb, err := func() (*types.LightBlock, error) { @@ -403,18 +412,18 @@ func (r *Reactor) backfill( if err != nil { queue.retry(height) if errors.Is(err, errNoConnectedPeers) { - r.Logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", + r.logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", "sleepTime", sleepTime) time.Sleep(sleepTime) } else { // we don't punish the peer as it might just have not responded in time - r.Logger.Info("backfill: error with fetching light block", + r.logger.Info("backfill: error with fetching light block", "height", height, "err", err) } continue } if lb == nil { - r.Logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) + r.logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) queue.retry(height) // As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't // have any prior ones, thus we remove it from the peer list. @@ -426,12 +435,14 @@ func (r *Reactor) backfill( // hashes line up err = lb.ValidateBasic(chainID) if err != nil || lb.Height != height { - r.Logger.Info("backfill: fetched light block failed validate basic, removing peer...", + r.logger.Info("backfill: fetched light block failed validate basic, removing peer...", "err", err, "height", height) queue.retry(height) - r.blockCh.Error <- p2p.PeerError{ + if serr := r.blockCh.SendError(ctx, p2p.PeerError{ NodeID: peer, Err: fmt.Errorf("received invalid light block: %w", err), + }); serr != nil { + return } continue } @@ -441,7 +452,7 @@ func (r *Reactor) backfill( block: lb, peer: peer, }) - r.Logger.Debug("backfill: added light block to processing queue", "height", height) + r.logger.Debug("backfill: added light block to processing queue", "height", height) case <-queue.done(): return @@ -453,9 +464,6 @@ func (r *Reactor) backfill( // verify all light blocks for { select { - case <-r.closeCh: - queue.close() - return nil case <-ctx.Done(): queue.close() return nil @@ -465,27 +473,27 @@ func (r *Reactor) backfill( // we equate to. ValidatorsHash and CommitHash have already been // checked in the `ValidateBasic` if w, g := trustedBlockID.Hash, resp.block.Hash(); !bytes.Equal(w, g) { - r.Logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", + r.logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", "trustedHash", w, "receivedHash", g, "height", resp.block.Height) - r.blockCh.Error <- p2p.PeerError{ + if err := r.blockCh.SendError(ctx, p2p.PeerError{ NodeID: resp.peer, Err: fmt.Errorf("received invalid light block. Expected hash %v, got: %v", w, g), + }); err != nil { + return nil } queue.retry(resp.block.Height) continue } // save the signed headers - err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID) - if err != nil { + if err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID); err != nil { return err } // check if there has been a change in the validator set if lastValidatorSet != nil && !bytes.Equal(resp.block.Header.ValidatorsHash, resp.block.Header.NextValidatorsHash) { // save all the heights that the last validator set was the same - err = r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet) - if err != nil { + if err := r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet); err != nil { return err } @@ -495,7 +503,7 @@ func (r *Reactor) backfill( trustedBlockID = resp.block.LastBlockID queue.success() - r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) + r.logger.Info("backfill: verified and stored light block", "height", resp.block.Height) lastValidatorSet = resp.block.ValidatorSet @@ -519,7 +527,7 @@ func (r *Reactor) backfill( return err } - r.Logger.Info("successfully completed backfill process", "endHeight", queue.terminal.Height) + r.logger.Info("successfully completed backfill process", "endHeight", queue.terminal.Height) return nil } } @@ -528,12 +536,12 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *ssproto.SnapshotsRequest: - snapshots, err := r.recentSnapshots(recentSnapshots) + snapshots, err := r.recentSnapshots(ctx, recentSnapshots) if err != nil { logger.Error("failed to fetch snapshots", "err", err) return nil @@ -546,7 +554,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { "format", snapshot.Format, "peer", envelope.From, ) - r.snapshotCh.Out <- p2p.Envelope{ + + if err := r.snapshotCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, @@ -555,6 +564,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, + }); err != nil { + return err } } @@ -597,23 +608,23 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: - r.Logger.Debug( + r.logger.Debug( "received chunk request", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "peer", envelope.From, ) - resp, err := r.conn.LoadSnapshotChunkSync(context.Background(), abci.RequestLoadSnapshotChunk{ + resp, err := r.conn.LoadSnapshotChunk(ctx, abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, }) if err != nil { - r.Logger.Error( + r.logger.Error( "failed to load chunk", "height", msg.Height, "format", msg.Format, @@ -624,14 +635,14 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } - r.Logger.Debug( + r.logger.Debug( "sending chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "peer", envelope.From, ) - r.chunkCh.Out <- p2p.Envelope{ + if err := r.chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ Height: msg.Height, @@ -640,6 +651,8 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { Chunk: resp.Chunk, Missing: resp.Chunk == nil, }, + }); err != nil { + return err } case *ssproto.ChunkResponse: @@ -647,11 +660,11 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { defer r.mtx.RUnlock() if r.syncer == nil { - r.Logger.Debug("received unexpected chunk; no state sync in progress", "peer", envelope.From) + r.logger.Debug("received unexpected chunk; no state sync in progress", "peer", envelope.From) return nil } - r.Logger.Debug( + r.logger.Debug( "received chunk; adding to sync", "height", msg.Height, "format", msg.Format, @@ -666,7 +679,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { Sender: envelope.From, }) if err != nil { - r.Logger.Error( + r.logger.Error( "failed to add chunk", "height", msg.Height, "format", msg.Format, @@ -684,48 +697,54 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: - r.Logger.Info("received light block request", "height", msg.Height) + r.logger.Info("received light block request", "height", msg.Height) lb, err := r.fetchLightBlock(msg.Height) if err != nil { - r.Logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) + r.logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) return err } if lb == nil { - r.blockCh.Out <- p2p.Envelope{ + if err := r.blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: nil, }, + }); err != nil { + return err } return nil } lbproto, err := lb.ToProto() if err != nil { - r.Logger.Error("marshaling light block to proto", "err", err) + r.logger.Error("marshaling light block to proto", "err", err) return nil } // NOTE: If we don't have the light block we will send a nil light block // back to the requested node, indicating that we don't have it. - r.blockCh.Out <- p2p.Envelope{ + if err := r.blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: lbproto, }, + }); err != nil { + return err } - case *ssproto.LightBlockResponse: var height int64 if msg.LightBlock != nil { height = msg.LightBlock.SignedHeader.Header.Height } - r.Logger.Info("received light block response", "peer", envelope.From, "height", height) - if err := r.dispatcher.Respond(msg.LightBlock, envelope.From); err != nil { - r.Logger.Error("error processing light block response", "err", err, "height", height) + r.logger.Info("received light block response", "peer", envelope.From, "height", height) + if err := r.dispatcher.Respond(ctx, msg.LightBlock, envelope.From); err != nil { + if errors.Is(err, context.Canceled) { + return err + } + r.logger.Error("error processing light block response", "err", err, "height", height) } default: @@ -735,29 +754,30 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: - r.Logger.Debug("received consensus params request", "height", msg.Height) + r.logger.Debug("received consensus params request", "height", msg.Height) cp, err := r.stateStore.LoadConsensusParams(int64(msg.Height)) if err != nil { - r.Logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) + r.logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) return nil } cpproto := cp.ToProto() - r.paramsCh.Out <- p2p.Envelope{ + if err := r.paramsCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ParamsResponse{ Height: msg.Height, ConsensusParams: cpproto, }, + }); err != nil { + return err } - case *ssproto.ParamsResponse: r.mtx.RLock() defer r.mtx.RUnlock() - r.Logger.Debug("received consensus params response", "height", msg.Height) + r.logger.Debug("received consensus params response", "height", msg.Height) cp := types.ConsensusParamsFromProto(msg.ConsensusParams) @@ -768,7 +788,7 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { return errors.New("failed to send consensus params, stateprovider not ready for response") } } else { - r.Logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) + r.logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) } default: @@ -781,11 +801,11 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, chID p2p.ChannelID, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -793,21 +813,17 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - r.Logger.Debug("received message", "message", reflect.TypeOf(envelope.Message), "peer", envelope.From) + r.logger.Debug("received message", "message", reflect.TypeOf(envelope.Message), "peer", envelope.From) switch chID { case SnapshotChannel: - err = r.handleSnapshotMessage(envelope) - + err = r.handleSnapshotMessage(ctx, envelope) case ChunkChannel: - err = r.handleChunkMessage(envelope) - + err = r.handleChunkMessage(ctx, envelope) case LightBlockChannel: - err = r.handleLightBlockMessage(envelope) - + err = r.handleLightBlockMessage(ctx, envelope) case ParamsChannel: - err = r.handleParamsMessage(envelope) - + err = r.handleParamsMessage(ctx, envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } @@ -815,58 +831,34 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } -// processSnapshotCh initiates a blocking process where we listen for and handle -// envelopes on the SnapshotChannel. -func (r *Reactor) processSnapshotCh() { - r.processCh(r.snapshotCh, "snapshot") -} - -// processChunkCh initiates a blocking process where we listen for and handle -// envelopes on the ChunkChannel. -func (r *Reactor) processChunkCh() { - r.processCh(r.chunkCh, "chunk") -} - -// processBlockCh initiates a blocking process where we listen for and handle -// envelopes on the LightBlockChannel. -func (r *Reactor) processBlockCh() { - r.processCh(r.blockCh, "light block") -} - -func (r *Reactor) processParamsCh() { - r.processCh(r.paramsCh, "consensus params") -} - // processCh routes state sync messages to their respective handlers. Any error // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal // and close the p2p Channel gracefully. -func (r *Reactor) processCh(ch *p2p.Channel, chName string) { - defer ch.Close() - - for { - select { - case envelope := <-ch.In: - if err := r.handleMessage(ch.ID, envelope); err != nil { - r.Logger.Error(fmt.Sprintf("failed to process %s message", chName), - "ch_id", ch.ID, "envelope", envelope, "err", err) - ch.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processCh(ctx context.Context, ch *p2p.Channel, chName string) { + iter := ch.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, ch.ID, envelope); err != nil { + r.logger.Error("failed to process message", + "err", err, + "channel", chName, + "ch_id", ch.ID, + "envelope", envelope) + if serr := ch.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug(fmt.Sprintf("stopped listening on %s channel; closing...", chName)) - return } } } // processPeerUpdate processes a PeerUpdate, returning an error upon failing to // handle the PeerUpdate or if a panic is recovered. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { + r.logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) switch peerUpdate.Status { case p2p.PeerStatusUp: @@ -885,9 +877,9 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) r.providers[peerUpdate.NodeID] = newProvider - err := r.syncer.AddPeer(peerUpdate.NodeID) + err := r.syncer.AddPeer(ctx, peerUpdate.NodeID) if err != nil { - r.Logger.Error("error adding peer to syncer", "error", err) + r.logger.Error("error adding peer to syncer", "error", err) return } if sp, ok := r.stateProvider.(*stateProviderP2P); ok { @@ -900,30 +892,26 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { delete(r.providers, peerUpdate.NodeID) r.syncer.RemovePeer(peerUpdate.NodeID) } - r.Logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) + r.logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) } // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-r.peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate) } } } // recentSnapshots fetches the n most recent snapshots from the app -func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshotsSync(context.Background(), abci.RequestListSnapshots{}) +func (r *Reactor) recentSnapshots(ctx context.Context, n uint32) ([]*snapshot, error) { + resp, err := r.conn.ListSnapshots(ctx, abci.RequestListSnapshots{}) if err != nil { return nil, err } @@ -1005,13 +993,10 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { case <-ctx.Done(): return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", time.Since(startAt).Seconds(), r.peers.Len(), numPeers) - case <-r.closeCh: - return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", - time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-t.C: continue case <-logT.C: - r.Logger.Info("waiting for sufficient peers to start statesync", + r.logger.Info("waiting for sufficient peers to start statesync", "duration", time.Since(startAt).String(), "target", numPeers, "peers", r.peers.Len(), @@ -1030,7 +1015,7 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial Height: r.cfg.TrustHeight, Hash: r.cfg.TrustHashBytes(), } - spLogger := r.Logger.With("module", "stateprovider") + spLogger := r.logger.With("module", "stateprovider") spLogger.Info("initializing state provider", "trustPeriod", to.Period, "trustHeight", to.Height, "useP2P", r.cfg.UseP2P) @@ -1045,7 +1030,7 @@ func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initial providers[idx] = NewBlockProvider(p, chainID, r.dispatcher) } - r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, providers, to, r.paramsCh.Out, spLogger) + r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, providers, to, r.paramsCh, spLogger) if err != nil { return fmt.Errorf("failed to initialize P2P state provider: %w", err) } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index b90e5fd789..c1ca87b2c5 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -29,9 +29,9 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace) -) +var m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace) + +const testAppVersion = 9 type reactorTestSuite struct { reactor *Reactor @@ -69,6 +69,7 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, conn *proxymocks.AppConnSnapshot, connQuery *proxymocks.AppConnQuery, @@ -145,63 +146,84 @@ func setup( cfg := config.DefaultStateSyncConfig() - rts.reactor = NewReactor( + chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + switch desc.ID { + case SnapshotChannel: + return rts.snapshotChannel, nil + case ChunkChannel: + return rts.chunkChannel, nil + case LightBlockChannel: + return rts.blockChannel, nil + case ParamsChannel: + return rts.paramsChannel, nil + default: + return nil, fmt.Errorf("invalid channel; %v", desc.ID) + } + } + + logger := log.NewNopLogger() + + var err error + rts.reactor, err = NewReactor( + ctx, factory.DefaultTestChainID, 1, *cfg, - log.TestingLogger(), + logger.With("component", "reactor"), conn, connQuery, - rts.snapshotChannel, - rts.chunkChannel, - rts.blockChannel, - rts.paramsChannel, + chCreator, rts.peerUpdates, rts.stateStore, rts.blockStore, "", m, + nil, // eventbus can be nil ) + require.NoError(t, err) rts.syncer = newSyncer( *cfg, - log.NewNopLogger(), + logger.With("component", "syncer"), conn, connQuery, stateProvider, - rts.snapshotOutCh, - rts.chunkOutCh, - rts.snapshotChannel.Done(), + rts.snapshotChannel, + rts.chunkChannel, "", rts.reactor.metrics, ) - require.NoError(t, rts.reactor.Start()) + ctx, cancel := context.WithCancel(ctx) + + require.NoError(t, rts.reactor.Start(ctx)) require.True(t, rts.reactor.IsRunning()) - t.Cleanup(func() { - require.NoError(t, rts.reactor.Stop()) - require.False(t, rts.reactor.IsRunning()) - }) + t.Cleanup(cancel) + t.Cleanup(rts.reactor.Wait) + t.Cleanup(leaktest.Check(t)) return rts } func TestReactor_Sync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const snapshotHeight = 7 - rts := setup(t, nil, nil, nil, 2) - chain := buildLightBlockChain(t, 1, 10, time.Now()) + rts := setup(ctx, t, nil, nil, nil, 2) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now()) // app accepts any snapshot - rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). + rts.conn.On("OfferSnapshot", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) // app accepts every chunk - rts.conn.On("ApplySnapshotChunkSync", ctx, mock.AnythingOfType("types.RequestApplySnapshotChunk")). + rts.conn.On("ApplySnapshotChunk", ctx, mock.AnythingOfType("types.RequestApplySnapshotChunk")). Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) // app query returns valid state app hash - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ - AppVersion: 9, + rts.connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ + AppVersion: testAppVersion, LastBlockHeight: snapshotHeight, LastBlockAppHash: chain[snapshotHeight+1].AppHash, }, nil) @@ -213,9 +235,9 @@ func TestReactor_Sync(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second) + go graduallyAddPeers(t, rts.peerUpdateCh, closeCh, 1*time.Second) go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ { Height: uint64(snapshotHeight), @@ -226,7 +248,7 @@ func TestReactor_Sync(t *testing.T) { go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) // update the config to use the p2p provider rts.reactor.cfg.UseP2P = true @@ -235,12 +257,15 @@ func TestReactor_Sync(t *testing.T) { rts.reactor.cfg.DiscoveryTime = 1 * time.Second // Run state sync - _, err := rts.reactor.Sync(context.Background()) + _, err := rts.reactor.Sync(ctx) require.NoError(t, err) } func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -282,19 +307,23 @@ func TestReactor_ChunkRequest(t *testing.T) { }, } - for name, tc := range testcases { - tc := tc + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{ + conn.On("LoadSnapshotChunk", mock.Anything, abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - rts := setup(t, conn, nil, nil, 2) + rts := setup(ctx, t, conn, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -311,7 +340,10 @@ func TestReactor_ChunkRequest(t *testing.T) { } func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -360,18 +392,23 @@ func TestReactor_SnapshotsRequest(t *testing.T) { }, }, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn.On("ListSnapshots", mock.Anything, abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - rts := setup(t, conn, nil, nil, 100) + rts := setup(ctx, t, conn, nil, nil, 100) rts.snapshotInCh <- p2p.Envelope{ From: types.NodeID("aa"), @@ -379,7 +416,7 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } if len(tc.expectResponses) > 0 { - retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) + retryUntil(ctx, t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) } responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses)) @@ -395,14 +432,18 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } func TestReactor_LightBlockResponse(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) var height int64 = 10 - h := factory.MakeRandomHeader() + // generates a random header + h := factory.MakeHeader(t, &types.Header{}) h.Height = height blockID := factory.MakeBlockIDWithHash(h.Hash()) - vals, pv := factory.RandValidatorSet(1, 10) - vote, err := factory.MakeVote(pv[0], h.ChainID, 0, h.Height, 0, 2, + vals, pv := factory.ValidatorSet(ctx, t, 1, 10) + vote, err := factory.MakeVote(ctx, pv[0], h.ChainID, 0, h.Height, 0, 2, blockID, factory.DefaultTestTime) require.NoError(t, err) @@ -448,7 +489,10 @@ func TestReactor_LightBlockResponse(t *testing.T) { } func TestReactor_BlockProviders(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: types.NodeID("aa"), Status: p2p.PeerStatusUp, @@ -461,8 +505,8 @@ func TestReactor_BlockProviders(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now()) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) peers := rts.reactor.peers.All() require.Len(t, peers, 2) @@ -479,7 +523,7 @@ func TestReactor_BlockProviders(t *testing.T) { go func(t *testing.T, p provider.Provider) { defer wg.Done() for height := 2; height < 10; height++ { - lb, err := p.LightBlock(context.Background(), int64(height)) + lb, err := p.LightBlock(ctx, int64(height)) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, height, int(lb.Height)) @@ -487,7 +531,6 @@ func TestReactor_BlockProviders(t *testing.T) { }(t, p) } - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -501,7 +544,10 @@ func TestReactor_BlockProviders(t *testing.T) { } func TestReactor_StateProviderP2P(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) // make syncer non nil else test won't think we are state syncing rts.reactor.syncer = rts.syncer peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) @@ -518,9 +564,9 @@ func TestReactor_StateProviderP2P(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - chain := buildLightBlockChain(t, 1, 10, time.Now()) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now()) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 @@ -533,10 +579,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { } require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") - bctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ictx, cancel := context.WithTimeout(bctx, time.Second) + ictx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() rts.reactor.mtx.Lock() @@ -545,7 +588,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { require.NoError(t, err) rts.reactor.syncer.stateProvider = rts.reactor.stateProvider - actx, cancel := context.WithTimeout(bctx, 10*time.Second) + actx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() appHash, err := rts.reactor.stateProvider.AppHash(actx, 5) @@ -569,13 +612,19 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // test backfill algorithm with varying failure rates [0, 10] failureRates := []int{0, 2, 9} for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) - rts := setup(t, nil, nil, nil, 21) + rts := setup(ctx, t, nil, nil, nil, 21) var ( startHeight int64 = 20 @@ -601,15 +650,15 @@ func TestReactor_Backfill(t *testing.T) { return nil }) - chain := buildLightBlockChain(t, stopHeight-1, startHeight+1, stopTime) + chain := buildLightBlockChain(ctx, t, stopHeight-1, startHeight+1, stopTime) closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, failureRate) err := rts.reactor.backfill( - context.Background(), + ctx, factory.DefaultTestChainID, startHeight, stopHeight, @@ -644,8 +693,8 @@ func TestReactor_Backfill(t *testing.T) { // retryUntil will continue to evaluate fn and will return successfully when true // or fail when the timeout is reached. -func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) +func retryUntil(ctx context.Context, t *testing.T, fn func() bool, timeout time.Duration) { + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() for { @@ -656,7 +705,9 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { } } -func handleLightBlockRequests(t *testing.T, +func handleLightBlockRequests( + ctx context.Context, + t *testing.T, chain map[int64]*types.LightBlock, receiving chan p2p.Envelope, sending chan p2p.Envelope, @@ -666,6 +717,8 @@ func handleLightBlockRequests(t *testing.T, errorCount := 0 for { select { + case <-ctx.Done(): + return case envelope := <-receiving: if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { if requests%10 >= failureRate { @@ -680,8 +733,8 @@ func handleLightBlockRequests(t *testing.T, } else { switch errorCount % 3 { case 0: // send a different block - vals, pv := factory.RandValidatorSet(3, 10) - _, _, lb := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) + vals, pv := factory.ValidatorSet(ctx, t, 3, 10) + _, _, lb := mockLB(ctx, t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) differntLB, err := lb.ToProto() require.NoError(t, err) sending <- p2p.Envelope{ @@ -709,13 +762,24 @@ func handleLightBlockRequests(t *testing.T, } } -func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) { +func handleConsensusParamsRequest( + ctx context.Context, + t *testing.T, + receiving, sending chan p2p.Envelope, + closeCh chan struct{}, +) { t.Helper() params := types.DefaultConsensusParams() paramsProto := params.ToProto() for { select { + case <-ctx.Done(): + return case envelope := <-receiving: + if ctx.Err() != nil { + return + } + t.Log("received consensus params request") msg, ok := envelope.Message.(*ssproto.ParamsRequest) require.True(t, ok) @@ -733,35 +797,38 @@ func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Enve } } -func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock { +func buildLightBlockChain(ctx context.Context, t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock { + t.Helper() chain := make(map[int64]*types.LightBlock, toHeight-fromHeight) lastBlockID := factory.MakeBlockID() blockTime := startTime.Add(time.Duration(fromHeight-toHeight) * time.Minute) - vals, pv := factory.RandValidatorSet(3, 10) + vals, pv := factory.ValidatorSet(ctx, t, 3, 10) for height := fromHeight; height < toHeight; height++ { - vals, pv, chain[height] = mockLB(t, height, blockTime, lastBlockID, vals, pv) + vals, pv, chain[height] = mockLB(ctx, t, height, blockTime, lastBlockID, vals, pv) lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash()) blockTime = blockTime.Add(1 * time.Minute) } return chain } -func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockID, +func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, lastBlockID types.BlockID, currentVals *types.ValidatorSet, currentPrivVals []types.PrivValidator, ) (*types.ValidatorSet, []types.PrivValidator, *types.LightBlock) { - header, err := factory.MakeHeader(&types.Header{ + t.Helper() + header := factory.MakeHeader(t, &types.Header{ Height: height, LastBlockID: lastBlockID, Time: time, }) - require.NoError(t, err) - nextVals, nextPrivVals := factory.RandValidatorSet(3, 10) + header.Version.App = testAppVersion + + nextVals, nextPrivVals := factory.ValidatorSet(ctx, t, 3, 10) header.ValidatorsHash = currentVals.Hash() header.NextValidatorsHash = nextVals.Hash() header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams() lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) - commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, currentPrivVals, time) + commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentPrivVals, time) require.NoError(t, err) return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -775,6 +842,7 @@ func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockI // graduallyAddPeers delivers a new randomly-generated peer update on peerUpdateCh once // per interval, until closeCh is closed. Each peer update is assigned a random node ID. func graduallyAddPeers( + t *testing.T, peerUpdateCh chan p2p.PeerUpdate, closeCh chan struct{}, interval time.Duration, @@ -784,7 +852,7 @@ func graduallyAddPeers( select { case <-ticker.C: peerUpdateCh <- p2p.PeerUpdate{ - NodeID: factory.RandomNodeID(), + NodeID: factory.RandomNodeID(t), Status: p2p.PeerStatusUp, } case <-closeCh: diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index a0620e4501..0e3bbb47a8 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -6,8 +6,8 @@ import ( "math/rand" "sort" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -41,7 +41,7 @@ func (s *snapshot) Key() snapshotKey { // snapshotPool discovers and aggregates snapshots across peers. type snapshotPool struct { - tmsync.Mutex + sync.Mutex snapshots map[snapshotKey]*snapshot snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b622824cde..dc54ee3e2a 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -6,11 +6,11 @@ import ( "errors" "fmt" "strings" + "sync" "time" dbm "github.com/tendermint/tm-db" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -40,10 +40,11 @@ type StateProvider interface { } type stateProviderRPC struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 providers map[lightprovider.Provider]string + logger log.Logger } // NewRPCStateProvider creates a new StateProvider using a light client and RPC clients. @@ -79,6 +80,7 @@ func NewRPCStateProvider( return nil, err } return &stateProviderRPC{ + logger: logger, lc: lc, initialHeight: initialHeight, providers: providerRemotes, @@ -176,7 +178,7 @@ func (s *stateProviderRPC) State(ctx context.Context, height uint64) (sm.State, if err != nil { return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err) } - rpcclient := lightrpc.NewClient(primaryRPC, s.lc) + rpcclient := lightrpc.NewClient(s.logger, primaryRPC, s.lc) result, err := rpcclient.ConsensusParams(ctx, ¤tLightBlock.Height) if err != nil { return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w", @@ -197,10 +199,10 @@ func rpcClient(server string) (*rpchttp.HTTP, error) { } type stateProviderP2P struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 - paramsSendCh chan<- p2p.Envelope + paramsSendCh *p2p.Channel paramsRecvCh chan types.ConsensusParams } @@ -212,7 +214,7 @@ func NewP2PStateProvider( initialHeight int64, providers []lightprovider.Provider, trustOptions light.TrustOptions, - paramsSendCh chan<- p2p.Envelope, + paramsSendCh *p2p.Channel, logger log.Logger, ) (StateProvider, error) { if len(providers) < 2 { @@ -382,15 +384,13 @@ func (s *stateProviderP2P) tryGetConsensusParamsFromWitnesses( return nil, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) } - select { - case s.paramsSendCh <- p2p.Envelope{ + if err := s.paramsSendCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.ParamsRequest{ Height: uint64(height), }, - }: - case <-ctx.Done(): - return nil, ctx.Err() + }); err != nil { + return nil, err } select { diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index b4212961a3..c22a59c38a 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -5,11 +5,11 @@ import ( "context" "errors" "fmt" + "sync" "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -57,20 +57,19 @@ type syncer struct { conn proxy.AppConnSnapshot connQuery proxy.AppConnQuery snapshots *snapshotPool - snapshotCh chan<- p2p.Envelope - chunkCh chan<- p2p.Envelope + snapshotCh *p2p.Channel + chunkCh *p2p.Channel tempDir string fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex + mtx sync.RWMutex chunks *chunkQueue metrics *Metrics avgChunkTime int64 lastSyncedSnapshotHeight int64 processingSnapshot *snapshot - closeCh <-chan struct{} } // newSyncer creates a new syncer. @@ -80,9 +79,8 @@ func newSyncer( conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, stateProvider StateProvider, - snapshotCh chan<- p2p.Envelope, - chunkCh chan<- p2p.Envelope, - closeCh <-chan struct{}, + snapshotCh *p2p.Channel, + chunkCh *p2p.Channel, tempDir string, metrics *Metrics, ) *syncer { @@ -98,7 +96,6 @@ func newSyncer( fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, metrics: metrics, - closeCh: closeCh, } } @@ -141,29 +138,13 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) (err error) { - defer func() { - // TODO: remove panic recover once AddPeer can no longer accientally send on - // closed channel. - // This recover was added to protect against the p2p message being sent - // to the snapshot channel after the snapshot channel was closed. - if r := recover(); r != nil { - err = fmt.Errorf("panic sending peer snapshot request: %v", r) - } - }() - +func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) error { s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - msg := p2p.Envelope{ + return s.snapshotCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-s.closeCh: - case s.snapshotCh <- msg: - } - return err + }) } // RemovePeer removes a peer from the pool. @@ -178,14 +159,16 @@ func (s *syncer) RemovePeer(peerID types.NodeID) { func (s *syncer) SyncAny( ctx context.Context, discoveryTime time.Duration, - requestSnapshots func(), + requestSnapshots func() error, ) (sm.State, *types.Commit, error) { if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { discoveryTime = minimumDiscoveryTime } if discoveryTime > 0 { - requestSnapshots() + if err := requestSnapshots(); err != nil { + return sm.State{}, nil, err + } s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) time.Sleep(discoveryTime) } @@ -366,7 +349,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu } // Verify app and update app version - appVersion, err := s.verifyApp(snapshot) + appVersion, err := s.verifyApp(ctx, snapshot) if err != nil { return sm.State{}, nil, err } @@ -384,7 +367,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) - resp, err := s.conn.OfferSnapshotSync(ctx, abci.RequestOfferSnapshot{ + resp, err := s.conn.OfferSnapshot(ctx, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, Format: snapshot.Format, @@ -426,7 +409,7 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time return fmt.Errorf("failed to fetch chunk: %w", err) } - resp, err := s.conn.ApplySnapshotChunkSync(ctx, abci.RequestApplySnapshotChunk{ + resp, err := s.conn.ApplySnapshotChunk(ctx, abci.RequestApplySnapshotChunk{ Index: chunk.Index, Chunk: chunk.Chunk, Sender: string(chunk.Sender), @@ -494,8 +477,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return - case <-s.closeCh: - return case <-time.After(2 * time.Second): continue } @@ -511,7 +492,9 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch ticker := time.NewTicker(s.retryTimeout) defer ticker.Stop() - s.requestChunk(snapshot, index) + if err := s.requestChunk(ctx, snapshot, index); err != nil { + return + } select { case <-chunks.WaitFor(index): @@ -522,8 +505,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return - case <-s.closeCh: - return } ticker.Stop() @@ -531,12 +512,16 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch } // requestChunk requests a chunk from a peer. -func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { +// +// returns nil if there are no peers for the given snapshot or the +// request is successfully made and an error if the request cannot be +// completed +func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) error { peer := s.snapshots.GetPeer(snapshot) if peer == "" { s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) - return + return nil } s.logger.Debug( @@ -556,16 +541,16 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { }, } - select { - case s.chunkCh <- msg: - case <-s.closeCh: + if err := s.chunkCh.Send(ctx, msg); err != nil { + return err } + return nil } // verifyApp verifies the sync, checking the app hash and last block height. It returns the // app version, which should be returned as part of the initial state. -func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { - resp, err := s.connQuery.InfoSync(context.Background(), proxy.RequestInfo) +func (s *syncer) verifyApp(ctx context.Context, snapshot *snapshot) (uint64, error) { + resp, err := s.connQuery.Info(ctx, proxy.RequestInfo) if err != nil { return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index ad902a54c1..46287ada1f 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -12,7 +12,6 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/proxy" proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" sm "github.com/tendermint/tendermint/internal/state" @@ -22,9 +21,10 @@ import ( "github.com/tendermint/tendermint/version" ) -var ctx = context.Background() - func TestSyncer_SyncAny(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + state := sm.State{ ChainID: "chain", Version: sm.Version{ @@ -68,7 +68,7 @@ func TestSyncer_SyncAny(t *testing.T) { peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts := setup(ctx, t, connSnapshot, connQuery, stateProvider, 4) rts.reactor.syncer = rts.syncer @@ -77,13 +77,13 @@ func TestSyncer_SyncAny(t *testing.T) { require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - err = rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(ctx, peerAID) require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - err = rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(ctx, peerBID) require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) @@ -110,7 +110,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -119,7 +119,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -131,36 +131,47 @@ func TestSyncer_SyncAny(t *testing.T) { }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) chunkRequests := make(map[uint32]int) - chunkRequestsMtx := tmsync.Mutex{} + chunkRequestsMtx := sync.Mutex{} - var wg sync.WaitGroup - wg.Add(4) + chunkProcessDone := make(chan struct{}) go func() { - for e := range rts.chunkOutCh { - msg, ok := e.Message.(*ssproto.ChunkRequest) - assert.True(t, ok) - - assert.EqualValues(t, 1, msg.Height) - assert.EqualValues(t, 1, msg.Format) - assert.LessOrEqual(t, msg.Index, uint32(len(chunks))) - - added, err := rts.syncer.AddChunk(chunks[msg.Index]) - assert.NoError(t, err) - assert.True(t, added) - - chunkRequestsMtx.Lock() - chunkRequests[msg.Index]++ - chunkRequestsMtx.Unlock() + defer close(chunkProcessDone) + var seen int + for { + if seen >= 4 { + return + } - wg.Done() + select { + case <-ctx.Done(): + t.Logf("sent %d chunks", seen) + return + case e := <-rts.chunkOutCh: + msg, ok := e.Message.(*ssproto.ChunkRequest) + assert.True(t, ok) + + assert.EqualValues(t, 1, msg.Height) + assert.EqualValues(t, 1, msg.Format) + assert.LessOrEqual(t, msg.Index, uint32(len(chunks))) + + added, err := rts.syncer.AddChunk(chunks[msg.Index]) + assert.NoError(t, err) + assert.True(t, added) + + chunkRequestsMtx.Lock() + chunkRequests[msg.Index]++ + chunkRequestsMtx.Unlock() + seen++ + t.Logf("added chunk (%d of 4): %d", seen, msg.Index) + } } }() // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ @@ -168,25 +179,25 @@ func TestSyncer_SyncAny(t *testing.T) { RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), }, nil) - newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() {}) + newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.NoError(t, err) - wg.Wait() + <-chunkProcessDone chunkRequestsMtx.Lock() require.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) @@ -217,9 +228,12 @@ func TestSyncer_SyncAny_noSnapshots(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, _, err := rts.syncer.SyncAny(ctx, 0, func() {}) + rts := setup(ctx, t, nil, nil, stateProvider, 2) + + _, _, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) } @@ -227,7 +241,10 @@ func TestSyncer_SyncAny_abort(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") @@ -235,11 +252,11 @@ func TestSyncer_SyncAny_abort(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -248,7 +265,10 @@ func TestSyncer_SyncAny_reject(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -266,19 +286,19 @@ func TestSyncer_SyncAny_reject(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -287,7 +307,10 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -305,15 +328,15 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -322,7 +345,10 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -351,15 +377,15 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, sbc) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -368,7 +394,10 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -378,11 +407,11 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.True(t, errors.Is(err, errBoom)) rts.conn.AssertExpectations(t) } @@ -405,16 +434,23 @@ func TestSyncer_offerSnapshot(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) @@ -451,13 +487,20 @@ func TestSyncer_applyChunks_Results(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") @@ -468,11 +511,11 @@ func TestSyncer_applyChunks_Results(t *testing.T) { _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) @@ -505,13 +548,19 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) @@ -529,13 +578,13 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -570,13 +619,19 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. @@ -623,13 +678,13 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, Sender: "aa", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, Sender: "bb", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -638,7 +693,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // On retry, the last chunk will be tried again, so we just accept it then. if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } @@ -693,13 +748,19 @@ func TestSyncer_verifyApp(t *testing.T) { }, nil, errVerifyFailed}, "error": {nil, boom, boom}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + rts := setup(ctx, t, nil, nil, nil, 2) - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(tc.response, tc.err) - version, err := rts.syncer.verifyApp(s) + rts.connQuery.On("Info", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.err) + version, err := rts.syncer.verifyApp(ctx, s) unwrapped := errors.Unwrap(err) if unwrapped != nil { err = unwrapped diff --git a/internal/store/store.go b/internal/store/store.go index c978241ff2..eb03e5fe61 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -149,7 +149,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. - panic(fmt.Sprintf("Error reading block: %v", err)) + panic(fmt.Errorf("error reading block: %w", err)) } block, err := types.BlockFromProto(pbb) @@ -181,6 +181,26 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.LoadBlock(height) } +// LoadBlockMetaByHash returns the blockmeta who's header corresponds to the given +// hash. If none is found, returns nil. +func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { + bz, err := bs.db.Get(blockHashKey(hash)) + if err != nil { + panic(err) + } + if len(bz) == 0 { + return nil + } + + s := string(bz) + height, err := strconv.ParseInt(s, 10, 64) + + if err != nil { + panic(fmt.Sprintf("failed to extract height from %s: %v", s, err)) + } + return bs.LoadBlockMeta(height) +} + // LoadBlockPart returns the Part at the given index // from the block at the given height. // If no part is found for the given height and index, it returns nil. @@ -201,7 +221,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { } part, err := types.PartFromProto(pbpart) if err != nil { - panic(fmt.Sprintf("Error reading block part: %v", err)) + panic(fmt.Errorf("error reading block part: %w", err)) } return part @@ -253,7 +273,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { } commit, err := types.CommitFromProto(pbc) if err != nil { - panic(fmt.Sprintf("Error reading block commit: %v", err)) + panic(fmt.Errorf("error reading block commit: %w", err)) } return commit } @@ -273,7 +293,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit { } err = proto.Unmarshal(bz, pbc) if err != nil { - panic(fmt.Sprintf("error reading block seen commit: %v", err)) + panic(fmt.Errorf("error reading block seen commit: %w", err)) } commit, err := types.CommitFromProto(pbc) @@ -552,6 +572,10 @@ func (bs *BlockStore) SaveSignedHeader(sh *types.SignedHeader, blockID types.Blo return batch.Close() } +func (bs *BlockStore) Close() error { + return bs.db.Close() +} + //---------------------------------- KEY ENCODING ----------------------------------------- // key prefixes diff --git a/internal/store/store_test.go b/internal/store/store_test.go index d44c3da606..347b0d4aec 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -2,6 +2,7 @@ package store import ( "fmt" + stdlog "log" "os" "runtime/debug" "strings" @@ -39,14 +40,18 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { types.BlockID{Hash: []byte(""), PartSetHeader: types.PartSetHeader{Hash: []byte(""), Total: 2}}, commitSigs) } -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - cfg := config.ResetTestRoot("blockchain_reactor_test") +func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc, error) { + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + if err != nil { + return sm.State{}, nil, nil, err + } + blockDB := dbm.NewMemDB() state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { - panic(fmt.Errorf("error constructing state from genesis file: %w", err)) + return sm.State{}, nil, nil, fmt.Errorf("error constructing state from genesis file: %w", err) } - return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) } + return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) }, nil } func freshBlockStore() (*BlockStore, dbm.DB) { @@ -65,9 +70,22 @@ var ( func TestMain(m *testing.M) { var cleanup cleanupFunc - state, _, cleanup = makeStateAndBlockStore(log.NewNopLogger()) - block = factory.MakeBlock(state, 1, new(types.Commit)) - partSet = block.MakePartSet(2) + var err error + + state, _, cleanup, err = makeStateAndBlockStore(log.NewNopLogger()) + if err != nil { + stdlog.Fatal(err) + } + + block, err = factory.MakeBlock(state, 1, new(types.Commit)) + + if err != nil { + stdlog.Fatal(err) + } + partSet, err = block.MakePartSet(2) + if err != nil { + stdlog.Fatal(err) + } part1 = partSet.GetPart(0) part2 = partSet.GetPart(1) seenCommit1 = makeTestCommit(10, tmtime.Now()) @@ -78,8 +96,9 @@ func TestMain(m *testing.M) { // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) + state, bs, cleanup, err := makeStateAndBlockStore(log.NewNopLogger()) defer cleanup() + require.NoError(t, err) require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") @@ -92,8 +111,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // save a block - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) - validPartSet := block.MakePartSet(2) + block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + require.NoError(t, err) + validPartSet, err := block.MakePartSet(2) + require.NoError(t, err) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") @@ -101,7 +122,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) - _, err := uncontiguousPartSet.AddPart(part2) + _, err = uncontiguousPartSet.AddPart(part2) require.Error(t, err) header1 := types.Header{ @@ -268,7 +289,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } assert.Nil(t, panicErr, "#%d: unexpected panic", i) - assert.Nil(t, err, "#%d: expecting a non-nil error", i) + assert.NoError(t, err, "#%d: expecting a non-nil error", i) qua, ok := res.(*quad) if !ok || qua == nil { t.Errorf("#%d: got nil quad back; gotType=%T", i, res) @@ -286,15 +307,19 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - cfg := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) for h := int64(1); h <= 10; h++ { - block := factory.MakeBlock(state, h, new(types.Commit)) - partSet := block.MakePartSet(2) + block, err := factory.MakeBlock(state, h, new(types.Commit)) + require.NoError(t, err) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -342,7 +367,9 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - cfg := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot("blockchain_reactor_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) @@ -357,8 +384,10 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { - block := factory.MakeBlock(state, h, new(types.Commit)) - partSet := block.MakePartSet(2) + block, err := factory.MakeBlock(state, h, new(types.Commit)) + require.NoError(t, err) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -459,12 +488,15 @@ func TestLoadBlockMeta(t *testing.T) { } func TestBlockFetchAtHeight(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) + state, bs, cleanup, err := makeStateAndBlockStore(log.NewNopLogger()) defer cleanup() + require.NoError(t, err) require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + require.NoError(t, err) - partSet := block.MakePartSet(2) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -503,8 +535,10 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // are persisted. for h := int64(3); h <= 5; h++ { blockCommit := makeTestCommit(h-1, tmtime.Now()) - block := factory.MakeBlock(state, h, blockCommit) - partSet := block.MakePartSet(2) + block, err := factory.MakeBlock(state, h, blockCommit) + require.NoError(t, err) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) c3 := bs.LoadSeenCommit() diff --git a/internal/test/factory/block.go b/internal/test/factory/block.go index f8772f1894..3fd34cdc5e 100644 --- a/internal/test/factory/block.go +++ b/internal/test/factory/block.go @@ -1,8 +1,10 @@ package factory import ( + "testing" "time" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/types" @@ -17,13 +19,6 @@ var ( DefaultTestTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) ) -func MakeVersion() version.Consensus { - return version.Consensus{ - Block: version.BlockProtocol, - App: 1, - } -} - func RandomAddress() []byte { return crypto.CRandBytes(crypto.AddressSize) } @@ -48,14 +43,15 @@ func MakeBlockIDWithHash(hash []byte) types.BlockID { // MakeHeader fills the rest of the contents of the header such that it passes // validate basic -func MakeHeader(h *types.Header) (*types.Header, error) { +func MakeHeader(t *testing.T, h *types.Header) *types.Header { + t.Helper() if h.Version.Block == 0 { h.Version.Block = version.BlockProtocol } if h.Height == 0 { h.Height = 1 } - if h.LastBlockID.IsZero() { + if h.LastBlockID.IsNil() { h.LastBlockID = MakeBlockID() } if h.ChainID == "" { @@ -89,13 +85,7 @@ func MakeHeader(h *types.Header) (*types.Header, error) { h.ProposerAddress = RandomAddress() } - return h, h.ValidateBasic() -} + require.NoError(t, h.ValidateBasic()) -func MakeRandomHeader() *types.Header { - h, err := MakeHeader(&types.Header{}) - if err != nil { - panic(err) - } return h } diff --git a/internal/test/factory/commit.go b/internal/test/factory/commit.go index d3a392a20e..1a8691855e 100644 --- a/internal/test/factory/commit.go +++ b/internal/test/factory/commit.go @@ -2,21 +2,18 @@ package factory import ( "context" - "fmt" "time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) -func MakeCommit(blockID types.BlockID, height int64, round int32, - voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) { - +func MakeCommit(ctx context.Context, blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, validators []types.PrivValidator, now time.Time) (*types.Commit, error) { // all sign for i := 0; i < len(validators); i++ { - pubKey, err := validators[i].GetPubKey(context.Background()) + pubKey, err := validators[i].GetPubKey(ctx) if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, err } vote := &types.Vote{ ValidatorAddress: pubKey.Address(), @@ -28,21 +25,16 @@ func MakeCommit(blockID types.BlockID, height int64, round int32, Timestamp: now, } - _, err = signAddVote(validators[i], vote, voteSet) - if err != nil { + v := vote.ToProto() + + if err := validators[i].SignVote(ctx, voteSet.ChainID(), v); err != nil { + return nil, err + } + vote.Signature = v.Signature + if _, err := voteSet.AddVote(vote); err != nil { return nil, err } } return voteSet.MakeCommit(), nil } - -func signAddVote(privVal types.PrivValidator, vote *types.Vote, voteSet *types.VoteSet) (signed bool, err error) { - v := vote.ToProto() - err = privVal.SignVote(context.Background(), voteSet.ChainID(), v) - if err != nil { - return false, err - } - vote.Signature = v.Signature - return voteSet.AddVote(vote) -} diff --git a/internal/test/factory/factory_test.go b/internal/test/factory/factory_test.go index 07a3ef8b33..6cdc2aed96 100644 --- a/internal/test/factory/factory_test.go +++ b/internal/test/factory/factory_test.go @@ -3,16 +3,13 @@ package factory import ( "testing" - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" ) func TestMakeHeader(t *testing.T) { - _, err := MakeHeader(&types.Header{}) - assert.NoError(t, err) + MakeHeader(t, &types.Header{}) } func TestRandomNodeID(t *testing.T) { - assert.NotPanics(t, func() { RandomNodeID() }) + RandomNodeID(t) } diff --git a/internal/test/factory/genesis.go b/internal/test/factory/genesis.go index d3a0a8464d..c49f9fce8e 100644 --- a/internal/test/factory/genesis.go +++ b/internal/test/factory/genesis.go @@ -1,35 +1,33 @@ package factory import ( - "sort" + "time" - "github.com/tendermint/tendermint/config" - tmtime "github.com/tendermint/tendermint/libs/time" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) -func RandGenesisDoc( - cfg *config.Config, - numValidators int, - randPower bool, - minPower int64) (*types.GenesisDoc, []types.PrivValidator) { +func GenesisDoc( + config *cfg.Config, + time time.Time, + validators []*types.Validator, + consensusParams *types.ConsensusParams, +) *types.GenesisDoc { - validators := make([]types.GenesisValidator, numValidators) - privValidators := make([]types.PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privVal := RandValidator(randPower, minPower) - validators[i] = types.GenesisValidator{ - PubKey: val.PubKey, - Power: val.VotingPower, + genesisValidators := make([]types.GenesisValidator, len(validators)) + + for i := range validators { + genesisValidators[i] = types.GenesisValidator{ + Power: validators[i].VotingPower, + PubKey: validators[i].PubKey, } - privValidators[i] = privVal } - sort.Sort(types.PrivValidatorsByAddress(privValidators)) return &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - InitialHeight: 1, - ChainID: cfg.ChainID(), - Validators: validators, - }, privValidators + GenesisTime: time, + InitialHeight: 1, + ChainID: config.ChainID(), + Validators: genesisValidators, + ConsensusParams: consensusParams, + } } diff --git a/internal/test/factory/p2p.go b/internal/test/factory/p2p.go index 34c139f588..40d8eda9d7 100644 --- a/internal/test/factory/p2p.go +++ b/internal/test/factory/p2p.go @@ -3,25 +3,29 @@ package factory import ( "encoding/hex" "strings" + "testing" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) // NodeID returns a valid NodeID based on an inputted string -func NodeID(str string) types.NodeID { +func NodeID(t *testing.T, str string) types.NodeID { + t.Helper() + id, err := types.NewNodeID(strings.Repeat(str, 2*types.NodeIDByteLength)) - if err != nil { - panic(err) - } + require.NoError(t, err) + return id } // RandomNodeID returns a randomly generated valid NodeID -func RandomNodeID() types.NodeID { +func RandomNodeID(t *testing.T) types.NodeID { + t.Helper() + id, err := types.NewNodeID(hex.EncodeToString(rand.Bytes(types.NodeIDByteLength))) - if err != nil { - panic(err) - } + require.NoError(t, err) + return id } diff --git a/internal/test/factory/tx.go b/internal/test/factory/tx.go index c97aeefc96..0353083490 100644 --- a/internal/test/factory/tx.go +++ b/internal/test/factory/tx.go @@ -2,15 +2,10 @@ package factory import "github.com/tendermint/tendermint/types" -// MakeTxs is a helper function to generate mock transactions by given the block height -// and the transaction numbers. -func MakeTxs(height int64, num int) (txs []types.Tx) { - for i := 0; i < num; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) +func MakeTenTxs(height int64) []types.Tx { + txs := make([]types.Tx, 10) + for i := range txs { + txs[i] = types.Tx([]byte{byte(height), byte(i)}) } return txs } - -func MakeTenTxs(height int64) (txs []types.Tx) { - return MakeTxs(height, 10) -} diff --git a/internal/test/factory/validator.go b/internal/test/factory/validator.go index 428d5d86e9..383ba7536e 100644 --- a/internal/test/factory/validator.go +++ b/internal/test/factory/validator.go @@ -2,36 +2,34 @@ package factory import ( "context" - "fmt" - "math/rand" "sort" + "testing" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/types" ) -func RandValidator(randPower bool, minPower int64) (*types.Validator, types.PrivValidator) { +func Validator(ctx context.Context, votingPower int64) (*types.Validator, types.PrivValidator, error) { privVal := types.NewMockPV() - votePower := minPower - if randPower { - // nolint:gosec // G404: Use of weak random number generator - votePower += int64(rand.Uint32()) - } - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) if err != nil { - panic(fmt.Errorf("could not retrieve pubkey %w", err)) + return nil, nil, err } - val := types.NewValidator(pubKey, votePower) - return val, privVal + + val := types.NewValidator(pubKey, votingPower) + return val, privVal, nil } -func RandValidatorSet(numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) { +func ValidatorSet(ctx context.Context, t *testing.T, numValidators int, votingPower int64) (*types.ValidatorSet, []types.PrivValidator) { var ( valz = make([]*types.Validator, numValidators) privValidators = make([]types.PrivValidator, numValidators) ) + t.Helper() for i := 0; i < numValidators; i++ { - val, privValidator := RandValidator(false, votingPower) + val, privValidator, err := Validator(ctx, votingPower) + require.NoError(t, err) valz[i] = val privValidators[i] = privValidator } diff --git a/internal/test/factory/vote.go b/internal/test/factory/vote.go index 8d07b008c3..fc63e8d681 100644 --- a/internal/test/factory/vote.go +++ b/internal/test/factory/vote.go @@ -9,6 +9,7 @@ import ( ) func MakeVote( + ctx context.Context, val types.PrivValidator, chainID string, valIndex int32, @@ -18,10 +19,11 @@ func MakeVote( blockID types.BlockID, time time.Time, ) (*types.Vote, error) { - pubKey, err := val.GetPubKey(context.Background()) + pubKey, err := val.GetPubKey(ctx) if err != nil { return nil, err } + v := &types.Vote{ ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, @@ -33,10 +35,10 @@ func MakeVote( } vpb := v.ToProto() - err = val.SignVote(context.Background(), chainID, vpb) - if err != nil { - panic(err) + if err := val.SignVote(ctx, chainID, vpb); err != nil { + return nil, err } + v.Signature = vpb.Signature return v, nil } diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index a0258521c4..d0f79ce142 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -5,13 +5,12 @@ import ( "errors" "fmt" "math" - mrand "math/rand" + "math/rand" "regexp" "strings" "sync" tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" tmprotobits "github.com/tendermint/tendermint/proto/tendermint/libs/bits" ) @@ -25,8 +24,6 @@ type BitArray struct { // NewBitArray returns a new bit array. // It returns nil if the number of bits is zero. func NewBitArray(bits int) *BitArray { - // Reseed non-deterministically. - tmrand.Reseed() if bits <= 0 { return nil } @@ -270,8 +267,14 @@ func (bA *BitArray) PickRandom() (int, bool) { if len(trueIndices) == 0 { // no bits set to true return 0, false } + + // NOTE: using the default math/rand might result in somewhat + // amount of determinism here. It would be possible to use + // rand.New(rand.NewSeed(time.Now().Unix())).Intn() to + // counteract this possibility if it proved to be material. + // // nolint:gosec // G404: Use of weak random number generator - return trueIndices[mrand.Intn(len(trueIndices))], true + return trueIndices[rand.Intn(len(trueIndices))], true } func (bA *BitArray) getTrueIndices() []int { diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 96f2e2257f..a12cc80a2b 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -3,7 +3,6 @@ package bits import ( "bytes" "encoding/json" - "fmt" "math" "testing" @@ -99,11 +98,11 @@ func TestSub(t *testing.T) { for _, tc := range testCases { var bA *BitArray err := json.Unmarshal([]byte(tc.initBA), &bA) - require.Nil(t, err) + require.NoError(t, err) var o *BitArray err = json.Unmarshal([]byte(tc.subtractingBA), &o) - require.Nil(t, err) + require.NoError(t, err) got, _ := json.Marshal(bA.Sub(o)) require.Equal( @@ -149,9 +148,8 @@ func TestBytes(t *testing.T) { bA := NewBitArray(4) bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { - if !bytes.Equal(bA.Bytes(), bz) { - panic(fmt.Sprintf("Expected %X but got %X", bz, bA.Bytes())) - } + require.True(t, bytes.Equal(bA.Bytes(), bz), + "Expected %X but got %X", bz, bA.Bytes()) } check(bA, []byte{0x01}) bA.SetIndex(3, true) diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index 6a9ca7c3d7..ce39935e81 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -14,12 +14,12 @@ func TestMarshal(t *testing.T) { bz := []byte("hello world") dataB := HexBytes(bz) bz2, err := dataB.Marshal() - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, bz, bz2) var dataB2 HexBytes err = (&dataB2).Unmarshal(bz) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, dataB, dataB2) } diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 4b87bd60be..76f3c9043a 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -19,7 +18,7 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } // RunWithArgs executes the given command with the specified command line args @@ -70,7 +69,10 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s var buf bytes.Buffer // io.Copy will end when we call reader.Close() below io.Copy(&buf, reader) //nolint:errcheck //ignore error - stdC <- buf.String() + select { + case <-cmd.Context().Done(): + case stdC <- buf.String(): + } }() return &stdC } diff --git a/libs/cli/setup.go b/libs/cli/setup.go index e4955dcf41..be69c30aff 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -1,6 +1,7 @@ package cli import ( + "context" "fmt" "os" "path/filepath" @@ -22,14 +23,15 @@ const ( // wrap if desired before the test type Executable interface { Execute() error + Context() context.Context } // PrepareBaseCmd is meant for tendermint and other servers func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { - cobra.OnInitialize(func() { initEnv(envPrefix) }) + cobra.OnInitialize(func() { InitEnv(envPrefix) }) cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") - cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) + cmd.PersistentPreRunE = concatCobraCmdFuncs(BindFlagsLoadViper, cmd.PersistentPreRunE) return Executor{cmd, os.Exit} } @@ -44,8 +46,8 @@ func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor return PrepareBaseCmd(cmd, envPrefix, defaultHome) } -// initEnv sets to use ENV variables if set. -func initEnv(prefix string) { +// InitEnv sets to use ENV variables if set. +func InitEnv(prefix string) { copyEnvVars(prefix) // env variables with TM prefix (eg. TM_ROOT) @@ -125,7 +127,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } // Bind all flags and read the config into viper -func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { +func BindFlagsLoadViper(cmd *cobra.Command, args []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 0cb3223446..bc62481af8 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -2,7 +2,7 @@ package cli import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" "testing" @@ -49,16 +49,15 @@ func TestSetupEnv(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) } } -func tempDir() string { - cdir, err := ioutil.TempDir("", "test-cli") - if err != nil { - panic(err) - } +func tempDir(t *testing.T) string { + t.Helper() + cdir, err := os.MkdirTemp("", "test-cli") + require.NoError(t, err) return cdir } @@ -66,9 +65,9 @@ func TestSetupConfig(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1 := "fubble" - conf1 := tempDir() + conf1 := tempDir(t) err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) - require.Nil(t, err) + require.NoError(t, err) cases := []struct { args []string @@ -109,7 +108,7 @@ func TestSetupConfig(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) assert.Equal(t, tc.expectedTwo, two, i) } @@ -125,13 +124,13 @@ func TestSetupUnmarshal(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1 := tempDir() + conf1 := tempDir(t) err := WriteConfigVals(conf1, map[string]string{"name": cval1}) - require.Nil(t, err) + require.NoError(t, err) // even with some ignored fields, should be no problem - conf2 := tempDir() + conf2 := tempDir(t) err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) - require.Nil(t, err) + require.NoError(t, err) // unused is not declared on a flag and remains from base base := DemoConfig{ @@ -188,7 +187,7 @@ func TestSetupUnmarshal(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + require.NoError(t, err, i) assert.Equal(t, tc.expected, cfg, i) } } @@ -221,7 +220,7 @@ func TestSetupTrace(t *testing.T) { viper.Reset() args := append([]string{cmd.Use}, tc.args...) stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) - require.NotNil(t, err, i) + require.Error(t, err, i) require.Equal(t, "", stdout, i) require.NotEqual(t, "", stderr, i) msg := strings.Split(stderr, "\n") diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go deleted file mode 100644 index 5aa82e807c..0000000000 --- a/libs/cmap/cmap.go +++ /dev/null @@ -1,91 +0,0 @@ -package cmap - -import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// CMap is a goroutine-safe map -type CMap struct { - m map[string]interface{} - l tmsync.Mutex -} - -func NewCMap() *CMap { - return &CMap{ - m: make(map[string]interface{}), - } -} - -func (cm *CMap) Set(key string, value interface{}) { - cm.l.Lock() - cm.m[key] = value - cm.l.Unlock() -} - -// GetOrSet returns the existing value if present. Othewise, it stores `newValue` and returns it. -func (cm *CMap) GetOrSet(key string, newValue interface{}) (value interface{}, alreadyExists bool) { - - cm.l.Lock() - defer cm.l.Unlock() - - if v, ok := cm.m[key]; ok { - return v, true - } - - cm.m[key] = newValue - return newValue, false -} - -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() - val := cm.m[key] - cm.l.Unlock() - return val -} - -func (cm *CMap) Has(key string) bool { - cm.l.Lock() - _, ok := cm.m[key] - cm.l.Unlock() - return ok -} - -func (cm *CMap) Delete(key string) { - cm.l.Lock() - delete(cm.m, key) - cm.l.Unlock() -} - -func (cm *CMap) Size() int { - cm.l.Lock() - size := len(cm.m) - cm.l.Unlock() - return size -} - -func (cm *CMap) Clear() { - cm.l.Lock() - cm.m = make(map[string]interface{}) - cm.l.Unlock() -} - -func (cm *CMap) Keys() []string { - cm.l.Lock() - - keys := make([]string, 0, len(cm.m)) - for k := range cm.m { - keys = append(keys, k) - } - cm.l.Unlock() - return keys -} - -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - items := make([]interface{}, 0, len(cm.m)) - for _, v := range cm.m { - items = append(items, v) - } - cm.l.Unlock() - return items -} diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go deleted file mode 100644 index 68a052bdb7..0000000000 --- a/libs/cmap/cmap_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package cmap - -import ( - "fmt" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIterateKeysWithValues(t *testing.T) { - cmap := NewCMap() - - for i := 1; i <= 10; i++ { - cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) - } - - // Testing size - assert.Equal(t, 10, cmap.Size()) - assert.Equal(t, 10, len(cmap.Keys())) - assert.Equal(t, 10, len(cmap.Values())) - - // Iterating Keys, checking for matching Value - for _, key := range cmap.Keys() { - val := strings.ReplaceAll(key, "key", "value") - assert.Equal(t, val, cmap.Get(key)) - } - - // Test if all keys are within []Keys() - keys := cmap.Keys() - for i := 1; i <= 10; i++ { - assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") - } - - // Delete 1 Key - cmap.Delete("key1") - - assert.NotEqual( - t, - len(keys), - len(cmap.Keys()), - "[]keys and []Keys() should not be equal, they are copies, one item was removed", - ) -} - -func TestContains(t *testing.T) { - cmap := NewCMap() - - cmap.Set("key1", "value1") - - // Test for known values - assert.True(t, cmap.Has("key1")) - assert.Equal(t, "value1", cmap.Get("key1")) - - // Test for unknown values - assert.False(t, cmap.Has("key2")) - assert.Nil(t, cmap.Get("key2")) -} - -func BenchmarkCMapHas(b *testing.B) { - m := NewCMap() - for i := 0; i < 1000; i++ { - m.Set(string(rune(i)), i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Has(string(rune(i))) - } -} - -func TestCMap_GetOrSet_Parallel(t *testing.T) { - - tests := []struct { - name string - newValue interface{} - parallelism int - }{ - {"test1", "a", 4}, - {"test2", "a", 40}, - {"test3", "a", 1}, - } - - //nolint:scopelint - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := NewCMap() - - wg := sync.WaitGroup{} - wg.Add(tt.parallelism) - for i := 0; i < tt.parallelism; i++ { - go func() { - defer wg.Done() - gotValue, _ := cm.GetOrSet(tt.name, tt.newValue) - assert.EqualValues(t, tt.newValue, gotValue) - }() - } - wg.Wait() - }) - } -} - -func TestCMap_GetOrSet_Exists(t *testing.T) { - cm := NewCMap() - - gotValue, exists := cm.GetOrSet("key", 1000) - assert.False(t, exists) - assert.EqualValues(t, 1000, gotValue) - - gotValue, exists = cm.GetOrSet("key", 2000) - assert.True(t, exists) - assert.EqualValues(t, 1000, gotValue) -} diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go index f508e873da..41633cbefa 100644 --- a/libs/events/event_cache.go +++ b/libs/events/event_cache.go @@ -1,5 +1,7 @@ package events +import "context" + // An EventCache buffers events for a Fireable // All events are cached. Filtering happens on Flush type EventCache struct { @@ -28,9 +30,9 @@ func (evc *EventCache) FireEvent(event string, data EventData) { // Fire events by running evsw.FireEvent on all cached events. Blocks. // Clears cached events -func (evc *EventCache) Flush() { +func (evc *EventCache) Flush(ctx context.Context) { for _, ei := range evc.events { - evc.evsw.FireEvent(ei.event, ei.data) + evc.evsw.FireEvent(ctx, ei.event, ei.data) } // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation evc.events = nil diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index d6199bc800..fb36fa6748 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -1,34 +1,42 @@ package events import ( + "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) func TestEventCache_Flush(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + evsw := NewEventSwitch(logger) + err := evsw.Start(ctx) require.NoError(t, err) - err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + err = evsw.AddListenerForEvent("nothingness", "", func(_ context.Context, data EventData) error { // Check we are not initializing an empty buffer full of zeroed eventInfos in the EventCache require.FailNow(t, "We should never receive a message on this switch since none are fired") + return nil }) require.NoError(t, err) evc := NewEventCache(evsw) - evc.Flush() + evc.Flush(ctx) // Check after reset - evc.Flush() + evc.Flush(ctx) fail := true pass := false - err = evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + err = evsw.AddListenerForEvent("somethingness", "something", func(_ context.Context, data EventData) error { if fail { require.FailNow(t, "Shouldn't see a message until flushed") } pass = true + return nil }) require.NoError(t, err) @@ -36,6 +44,6 @@ func TestEventCache_Flush(t *testing.T) { evc.FireEvent("something", struct{ int }{2}) evc.FireEvent("something", struct{ int }{3}) fail = false - evc.Flush() + evc.Flush(ctx) assert.True(t, pass) } diff --git a/libs/events/events.go b/libs/events/events.go index 146a9cfa7c..f97dfb1a1b 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -2,9 +2,11 @@ package events import ( + "context" "fmt" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -32,7 +34,7 @@ type Eventable interface { // // FireEvent fires an event with the given name and data. type Fireable interface { - FireEvent(eventValue string, data EventData) + FireEvent(ctx context.Context, eventValue string, data EventData) } // EventSwitch is the interface for synchronous pubsub, where listeners @@ -45,6 +47,7 @@ type Fireable interface { type EventSwitch interface { service.Service Fireable + Stop() error AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error RemoveListenerForEvent(event string, listenerID string) @@ -54,21 +57,21 @@ type EventSwitch interface { type eventSwitch struct { service.BaseService - mtx tmsync.RWMutex + mtx sync.RWMutex eventCells map[string]*eventCell listeners map[string]*eventListener } -func NewEventSwitch() EventSwitch { +func NewEventSwitch(logger log.Logger) EventSwitch { evsw := &eventSwitch{ eventCells: make(map[string]*eventCell), listeners: make(map[string]*eventListener), } - evsw.BaseService = *service.NewBaseService(nil, "EventSwitch", evsw) + evsw.BaseService = *service.NewBaseService(logger, "EventSwitch", evsw) return evsw } -func (evsw *eventSwitch) OnStart() error { +func (evsw *eventSwitch) OnStart(ctx context.Context) error { return nil } @@ -146,7 +149,7 @@ func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) } } -func (evsw *eventSwitch) FireEvent(event string, data EventData) { +func (evsw *eventSwitch) FireEvent(ctx context.Context, event string, data EventData) { // Get the eventCell evsw.mtx.RLock() eventCell := evsw.eventCells[event] @@ -157,14 +160,14 @@ func (evsw *eventSwitch) FireEvent(event string, data EventData) { } // Fire event for all listeners in eventCell - eventCell.FireEvent(data) + eventCell.FireEvent(ctx, data) } //----------------------------------------------------------------------------- // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { - mtx tmsync.RWMutex + mtx sync.RWMutex listeners map[string]EventCallback } @@ -188,7 +191,7 @@ func (cell *eventCell) RemoveListener(listenerID string) int { return numListeners } -func (cell *eventCell) FireEvent(data EventData) { +func (cell *eventCell) FireEvent(ctx context.Context, data EventData) { cell.mtx.RLock() eventCallbacks := make([]EventCallback, 0, len(cell.listeners)) for _, cb := range cell.listeners { @@ -197,18 +200,21 @@ func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RUnlock() for _, cb := range eventCallbacks { - cb(data) + if err := cb(ctx, data); err != nil { + // should we log or abort here? + continue + } } } //----------------------------------------------------------------------------- -type EventCallback func(data EventData) +type EventCallback func(ctx context.Context, data EventData) error type eventListener struct { id string - mtx tmsync.RWMutex + mtx sync.RWMutex removed bool events []string } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 9e21e02351..6d0c8b4e7f 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -1,37 +1,43 @@ package events import ( + "context" "fmt" + "math/rand" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/log" ) // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single // listener to an event, and sends a string "data". func TestAddListenerForEventFireOnce(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) messages := make(chan EventData) - err = evsw.AddListenerForEvent("listener", "event", - func(data EventData) { + require.NoError(t, evsw.AddListenerForEvent("listener", "event", + func(ctx context.Context, data EventData) error { // test there's no deadlock if we remove the listener inside a callback evsw.RemoveListener("listener") - messages <- data - }) - require.NoError(t, err) - go evsw.FireEvent("event", "data") + select { + case messages <- data: + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + go evsw.FireEvent(ctx, "event", "data") received := <-messages if received != "data" { t.Errorf("message received does not match: %v", received) @@ -41,28 +47,32 @@ func TestAddListenerForEventFireOnce(t *testing.T) { // TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single // listener to an event, and sends a thousand integers. func TestAddListenerForEventFireMany(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener for one event - err = evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event", + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event", doneSending, uint64(1)) + go fireEvents(ctx, evsw, "event", doneSending, uint64(1)) checkSum := <-doneSending close(numbers) eventSum := <-doneSum @@ -75,14 +85,14 @@ func TestAddListenerForEventFireMany(t *testing.T) { // listener to three different events and sends a thousand integers for each // of the three events. func TestAddListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum := make(chan uint64) doneSending1 := make(chan uint64) @@ -90,27 +100,39 @@ func TestAddListenerForDifferentEvents(t *testing.T) { doneSending3 := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener to three events - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event3", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event3", + func(ctx context.Context, data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1)) - go fireEvents(evsw, "event3", doneSending3, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(1)) var checkSum uint64 checkSum += <-doneSending1 checkSum += <-doneSending2 @@ -127,15 +149,14 @@ func TestAddListenerForDifferentEvents(t *testing.T) { // listener to two of those three events, and then sends a thousand integers // for each of the three events. func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + logger := log.NewTestingLogger(t) + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -145,39 +166,59 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 go sumReceivedNumbers(numbers2, doneSum2) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -199,14 +240,14 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { roundCount = 2000 ) - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) done1 := make(chan struct{}) done2 := make(chan struct{}) @@ -228,9 +269,10 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // we explicitly ignore errors here, since the listener will sometimes be removed // (that's what we're testing) _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), - func(data EventData) { + func(ctx context.Context, data EventData) error { t.Errorf("should not run callback for %d.\n", index) stopInputEvent = true + return nil }) } }() @@ -241,7 +283,7 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { evsw.RemoveListener("listener") // remove the last listener for i := 0; i < roundCount && !stopInputEvent; i++ { - evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) + evsw.FireEvent(ctx, fmt.Sprintf("event%d", i), uint64(1001)) } } @@ -249,14 +291,13 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // two events, fires a thousand integers for the first event, then unsubscribes // the listener and fires a thousand integers for the second event. func TestAddAndRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -265,26 +306,34 @@ func TestAddAndRemoveListener(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 go sumReceivedNumbers(numbers2, doneSum2) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) checkSumEvent1 := <-doneSending1 // after sending all event1, unsubscribe for all events evsw.RemoveListener("listener") - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) checkSumEvent2 := <-doneSending2 close(numbers1) close(numbers2) @@ -300,33 +349,32 @@ func TestAddAndRemoveListener(t *testing.T) { // TestRemoveListener does basic tests on adding and removing func TestRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", + func(ctx context.Context, data EventData) error { sum1++ - }) - require.NoError(t, err) - - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { + return nil + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", + func(ctx context.Context, data EventData) error { sum2++ - }) - require.NoError(t, err) + return nil + })) for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count, sum1) assert.Equal(t, count, sum2) @@ -334,8 +382,8 @@ func TestRemoveListener(t *testing.T) { // remove one by event and make sure it is gone evsw.RemoveListenerForEvent("event2", "listener") for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count*2, sum1) assert.Equal(t, count, sum2) @@ -343,8 +391,8 @@ func TestRemoveListener(t *testing.T) { // remove the listener entirely and make sure both gone evsw.RemoveListener("listener") for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) + evsw.FireEvent(ctx, "event1", true) + evsw.FireEvent(ctx, "event2", true) } assert.Equal(t, count*2, sum1) assert.Equal(t, count, sum2) @@ -361,14 +409,13 @@ func TestRemoveListener(t *testing.T) { // NOTE: it is important to run this test with race conditions tracking on, // `go test -race`, to examine for possible race conditions. func TestRemoveListenersAsync(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + evsw := NewEventSwitch(logger) + require.NoError(t, evsw.Start(ctx)) + t.Cleanup(evsw.Wait) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -378,53 +425,77 @@ func TestRemoveListenersAsync(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event1", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", + func(ctx context.Context, data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event1", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", + func(ctx context.Context, data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 go sumReceivedNumbers(numbers2, doneSum2) addListenersStress := func() { - r1 := rand.NewRand() + r1 := rand.New(rand.NewSource(time.Now().Unix())) r1.Seed(time.Now().UnixNano()) for k := uint16(0); k < 400; k++ { listenerNumber := r1.Intn(100) + 3 eventNumber := r1.Intn(3) + 1 go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), //nolint:errcheck // ignore for tests fmt.Sprintf("event%v", eventNumber), - func(_ EventData) {}) + func(context.Context, EventData) error { return nil }) } } removeListenersStress := func() { - r2 := rand.NewRand() + r2 := rand.New(rand.NewSource(time.Now().Unix())) r2.Seed(time.Now().UnixNano()) for k := uint16(0); k < 80; k++ { listenerNumber := r2.Intn(100) + 3 @@ -433,10 +504,10 @@ func TestRemoveListenersAsync(t *testing.T) { } addListenersStress() // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) removeListenersStress() - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -475,13 +546,21 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // to `offset` + 999. It additionally returns the addition of all integers // sent on `doneChan` for assertion that all events have been sent, and enabling // the test to assert all events have also been received. -func fireEvents(evsw Fireable, event string, doneChan chan uint64, - offset uint64) { +func fireEvents(ctx context.Context, evsw Fireable, event string, doneChan chan uint64, offset uint64) { + defer close(doneChan) + var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { + if ctx.Err() != nil { + break + } + + evsw.FireEvent(ctx, event, i) sentSum += i - evsw.FireEvent(event, i) } - doneChan <- sentSum - close(doneChan) + + select { + case <-ctx.Done(): + case doneChan <- sentSum: + } } diff --git a/libs/json/decoder.go b/libs/json/decoder.go deleted file mode 100644 index 86ff27d393..0000000000 --- a/libs/json/decoder.go +++ /dev/null @@ -1,278 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" -) - -// Unmarshal unmarshals JSON into the given value, using Amino-compatible JSON encoding (strings -// for 64-bit numbers, and type wrappers for registered types). -func Unmarshal(bz []byte, v interface{}) error { - return decode(bz, v) -} - -func decode(bz []byte, v interface{}) error { - if len(bz) == 0 { - return errors.New("cannot decode empty bytes") - } - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return errors.New("must decode into a pointer") - } - rv = rv.Elem() - - // If this is a registered type, defer to interface decoder regardless of whether the input is - // an interface or a bare value. This retains Amino's behavior, but is inconsistent with - // behavior in structs where an interface field will get the type wrapper while a bare value - // field will not. - if typeRegistry.name(rv.Type()) != "" { - return decodeReflectInterface(bz, rv) - } - - return decodeReflect(bz, rv) -} - -func decodeReflect(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("value is not addressable") - } - - // Handle null for slices, interfaces, and pointers - if bytes.Equal(bz, []byte("null")) { - rv.Set(reflect.Zero(rv.Type())) - return nil - } - - // Dereference-and-construct pointers, to handle nested pointers. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - // Times must be UTC and end with Z - if rv.Type() == timeType { - switch { - case len(bz) < 2 || bz[0] != '"' || bz[len(bz)-1] != '"': - return fmt.Errorf("JSON time must be an RFC3339 string, but got %q", bz) - case bz[len(bz)-2] != 'Z': - return fmt.Errorf("JSON time must be UTC and end with 'Z', but got %q", bz) - } - } - - // If value implements json.Umarshaler, call it. - if rv.Addr().Type().Implements(jsonUnmarshalerType) { - return rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz) - } - - switch rv.Type().Kind() { - // Decode complex types recursively. - case reflect.Slice, reflect.Array: - return decodeReflectList(bz, rv) - - case reflect.Map: - return decodeReflectMap(bz, rv) - - case reflect.Struct: - return decodeReflectStruct(bz, rv) - - case reflect.Interface: - return decodeReflectInterface(bz, rv) - - // For 64-bit integers, unwrap expected string and defer to stdlib for integer decoding. - case reflect.Int64, reflect.Int, reflect.Uint64, reflect.Uint: - if bz[0] != '"' || bz[len(bz)-1] != '"' { - return fmt.Errorf("invalid 64-bit integer encoding %q, expected string", string(bz)) - } - bz = bz[1 : len(bz)-1] - fallthrough - - // Anything else we defer to the stdlib. - default: - return decodeStdlib(bz, rv) - } -} - -func decodeReflectList(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("list value is not addressable") - } - - switch rv.Type().Elem().Kind() { - // Decode base64-encoded bytes using stdlib decoder, via byte slice for arrays. - case reflect.Uint8: - if rv.Type().Kind() == reflect.Array { - var buf []byte - if err := json.Unmarshal(bz, &buf); err != nil { - return err - } - if len(buf) != rv.Len() { - return fmt.Errorf("got %v bytes, expected %v", len(buf), rv.Len()) - } - reflect.Copy(rv, reflect.ValueOf(buf)) - - } else if err := decodeStdlib(bz, rv); err != nil { - return err - } - - // Decode anything else into a raw JSON slice, and decode values recursively. - default: - var rawSlice []json.RawMessage - if err := json.Unmarshal(bz, &rawSlice); err != nil { - return err - } - if rv.Type().Kind() == reflect.Slice { - rv.Set(reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), len(rawSlice), len(rawSlice))) - } - if rv.Len() != len(rawSlice) { // arrays of wrong size - return fmt.Errorf("got list of %v elements, expected %v", len(rawSlice), rv.Len()) - } - for i, bz := range rawSlice { - if err := decodeReflect(bz, rv.Index(i)); err != nil { - return err - } - } - } - - // Replace empty slices with nil slices, for Amino compatibility - if rv.Type().Kind() == reflect.Slice && rv.Len() == 0 { - rv.Set(reflect.Zero(rv.Type())) - } - - return nil -} - -func decodeReflectMap(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("map value is not addressable") - } - - // Decode into a raw JSON map, using string keys. - rawMap := make(map[string]json.RawMessage) - if err := json.Unmarshal(bz, &rawMap); err != nil { - return err - } - if rv.Type().Key().Kind() != reflect.String { - return fmt.Errorf("map keys must be strings, got %v", rv.Type().Key().String()) - } - - // Recursively decode values. - rv.Set(reflect.MakeMapWithSize(rv.Type(), len(rawMap))) - for key, bz := range rawMap { - value := reflect.New(rv.Type().Elem()).Elem() - if err := decodeReflect(bz, value); err != nil { - return err - } - rv.SetMapIndex(reflect.ValueOf(key), value) - } - return nil -} - -func decodeReflectStruct(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("struct value is not addressable") - } - sInfo := makeStructInfo(rv.Type()) - - // Decode raw JSON values into a string-keyed map. - rawMap := make(map[string]json.RawMessage) - if err := json.Unmarshal(bz, &rawMap); err != nil { - return err - } - for i, fInfo := range sInfo.fields { - if !fInfo.hidden { - frv := rv.Field(i) - bz := rawMap[fInfo.jsonName] - if len(bz) > 0 { - if err := decodeReflect(bz, frv); err != nil { - return err - } - } else if !fInfo.omitEmpty { - frv.Set(reflect.Zero(frv.Type())) - } - } - } - - return nil -} - -func decodeReflectInterface(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("interface value not addressable") - } - - // Decode the interface wrapper. - wrapper := interfaceWrapper{} - if err := json.Unmarshal(bz, &wrapper); err != nil { - return err - } - if wrapper.Type == "" { - return errors.New("interface type cannot be empty") - } - if len(wrapper.Value) == 0 { - return errors.New("interface value cannot be empty") - } - - // Dereference-and-construct pointers, to handle nested pointers. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - // Look up the interface type, and construct a concrete value. - rt, returnPtr := typeRegistry.lookup(wrapper.Type) - if rt == nil { - return fmt.Errorf("unknown type %q", wrapper.Type) - } - - cptr := reflect.New(rt) - crv := cptr.Elem() - if err := decodeReflect(wrapper.Value, crv); err != nil { - return err - } - - // This makes sure interface implementations with pointer receivers (e.g. func (c *Car)) are - // constructed as pointers behind the interface. The types must be registered as pointers with - // RegisterType(). - if rv.Type().Kind() == reflect.Interface && returnPtr { - if !cptr.Type().AssignableTo(rv.Type()) { - return fmt.Errorf("invalid type %q for this value", wrapper.Type) - } - rv.Set(cptr) - } else { - if !crv.Type().AssignableTo(rv.Type()) { - return fmt.Errorf("invalid type %q for this value", wrapper.Type) - } - rv.Set(crv) - } - return nil -} - -func decodeStdlib(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() && rv.Kind() != reflect.Ptr { - return errors.New("value must be addressable or pointer") - } - - // Make sure we are unmarshaling into a pointer. - target := rv - if rv.Kind() != reflect.Ptr { - target = reflect.New(rv.Type()) - } - if err := json.Unmarshal(bz, target.Interface()); err != nil { - return err - } - rv.Set(target.Elem()) - return nil -} - -type interfaceWrapper struct { - Type string `json:"type"` - Value json.RawMessage `json:"value"` -} diff --git a/libs/json/decoder_test.go b/libs/json/decoder_test.go deleted file mode 100644 index 41faa10627..0000000000 --- a/libs/json/decoder_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package json_test - -import ( - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/json" -) - -func TestUnmarshal(t *testing.T) { - i64Nil := (*int64)(nil) - str := "string" - strPtr := &str - structNil := (*Struct)(nil) - i32 := int32(32) - i64 := int64(64) - - testcases := map[string]struct { - json string - value interface{} - err bool - }{ - "bool true": {"true", true, false}, - "bool false": {"false", false, false}, - "float32": {"3.14", float32(3.14), false}, - "float64": {"3.14", float64(3.14), false}, - "int32": {`32`, int32(32), false}, - "int32 string": {`"32"`, int32(32), true}, - "int32 ptr": {`32`, &i32, false}, - "int64": {`"64"`, int64(64), false}, - "int64 noend": {`"64`, int64(64), true}, - "int64 number": {`64`, int64(64), true}, - "int64 ptr": {`"64"`, &i64, false}, - "int64 ptr nil": {`null`, i64Nil, false}, - "string": {`"foo"`, "foo", false}, - "string noend": {`"foo`, "foo", true}, - "string ptr": {`"string"`, &str, false}, - "slice byte": {`"AQID"`, []byte{1, 2, 3}, false}, - "slice bytes": {`["AQID"]`, [][]byte{{1, 2, 3}}, false}, - "slice int32": {`[1,2,3]`, []int32{1, 2, 3}, false}, - "slice int64": {`["1","2","3"]`, []int64{1, 2, 3}, false}, - "slice int64 number": {`[1,2,3]`, []int64{1, 2, 3}, true}, - "slice int64 ptr": {`["64"]`, []*int64{&i64}, false}, - "slice int64 empty": {`[]`, []int64(nil), false}, - "slice int64 null": {`null`, []int64(nil), false}, - "array byte": {`"AQID"`, [3]byte{1, 2, 3}, false}, - "array byte large": {`"AQID"`, [4]byte{1, 2, 3, 4}, true}, - "array byte small": {`"AQID"`, [2]byte{1, 2}, true}, - "array int32": {`[1,2,3]`, [3]int32{1, 2, 3}, false}, - "array int64": {`["1","2","3"]`, [3]int64{1, 2, 3}, false}, - "array int64 number": {`[1,2,3]`, [3]int64{1, 2, 3}, true}, - "array int64 large": {`["1","2","3"]`, [4]int64{1, 2, 3, 4}, true}, - "array int64 small": {`["1","2","3"]`, [2]int64{1, 2}, true}, - "map bytes": {`{"b":"AQID"}`, map[string][]byte{"b": {1, 2, 3}}, false}, - "map int32": {`{"a":1,"b":2}`, map[string]int32{"a": 1, "b": 2}, false}, - "map int64": {`{"a":"1","b":"2"}`, map[string]int64{"a": 1, "b": 2}, false}, - "map int64 empty": {`{}`, map[string]int64{}, false}, - "map int64 null": {`null`, map[string]int64(nil), false}, - "map int key": {`{}`, map[int]int{}, true}, - "time": {`"2020-06-03T17:35:30Z"`, time.Date(2020, 6, 3, 17, 35, 30, 0, time.UTC), false}, - "time non-utc": {`"2020-06-03T17:35:30+02:00"`, time.Time{}, true}, - "time nozone": {`"2020-06-03T17:35:30"`, time.Time{}, true}, - "car": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Car{Wheels: 4}, false}, - "car ptr": {`{"type":"vehicle/car","value":{"Wheels":4}}`, &Car{Wheels: 4}, false}, - "car iface": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Vehicle(&Car{Wheels: 4}), false}, - "boat": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Boat{Sail: true}, false}, - "boat ptr": {`{"type":"vehicle/boat","value":{"Sail":true}}`, &Boat{Sail: true}, false}, - "boat iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(Boat{Sail: true}), false}, - "boat into car": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Car{}, true}, - "boat into car iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(&Car{}), true}, - "shoes": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, Car{}, true}, - "shoes ptr": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, &Car{}, true}, - "shoes iface": {`{"type":"vehicle/shoes","value":{"Soles":"rubbes"}}`, Vehicle(&Car{}), true}, - "key public": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, false}, - "key wrong": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PrivateKey{1, 2, 3, 4, 5, 6, 7, 8}, true}, - "key into car": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, Vehicle(&Car{}), true}, - "tags": { - `{"name":"name","OmitEmpty":"foo","Hidden":"bar","tags":{"name":"child"}}`, - Tags{JSONName: "name", OmitEmpty: "foo", Tags: &Tags{JSONName: "child"}}, - false, - }, - "tags ptr": { - `{"name":"name","OmitEmpty":"foo","tags":null}`, - &Tags{JSONName: "name", OmitEmpty: "foo"}, - false, - }, - "tags real name": {`{"JSONName":"name"}`, Tags{}, false}, - "struct": { - `{ - "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", - "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", - "Time":"2020-06-02T16:05:13.004346374Z", - "Car":{"Wheels":4}, - "Boat":{"Sail":true}, - "Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"vehicle/boat","value":{"Sail":true}} - ], - "Child":{ - "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, - "String":"child", "StringPtrPtr":null, "Bytes":null, - "Time":"0001-01-01T00:00:00Z", - "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null - }, - "private": "foo", "unknown": "bar" - }`, - Struct{ - Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, - String: "foo", StringPtrPtr: &strPtr, Bytes: []byte{1, 2, 3}, - Time: time.Date(2020, 6, 2, 16, 5, 13, 4346374, time.UTC), - Car: &Car{Wheels: 4}, Boat: Boat{Sail: true}, Vehicles: []Vehicle{ - Vehicle(&Car{Wheels: 4}), - Vehicle(Boat{Sail: true}), - }, - Child: &Struct{Bool: false, String: "child"}, - }, - false, - }, - "struct key into vehicle": {`{"Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"key/public","value":"MTIzNDU2Nzg="} - ]}`, Struct{}, true}, - "struct ptr null": {`null`, structNil, false}, - "custom value": {`{"Value":"foo"}`, CustomValue{}, false}, - "custom ptr": {`"foo"`, &CustomPtr{Value: "custom"}, false}, - "custom ptr value": {`"foo"`, CustomPtr{Value: "custom"}, false}, - "invalid type": {`"foo"`, Struct{}, true}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - // Create a target variable as a pointer to the zero value of the tc.value type, - // and wrap it in an empty interface. Decode into that interface. - target := reflect.New(reflect.TypeOf(tc.value)).Interface() - err := json.Unmarshal([]byte(tc.json), target) - if tc.err { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Unwrap the target pointer and get the value behind the interface. - actual := reflect.ValueOf(target).Elem().Interface() - assert.Equal(t, tc.value, actual) - }) - } -} diff --git a/libs/json/doc.go b/libs/json/doc.go deleted file mode 100644 index d5ef4047f3..0000000000 --- a/libs/json/doc.go +++ /dev/null @@ -1,99 +0,0 @@ -// Package json provides functions for marshaling and unmarshaling JSON in a format that is -// backwards-compatible with Amino JSON encoding. This mostly differs from encoding/json in -// encoding of integers (64-bit integers are encoded as strings, not numbers), and handling -// of interfaces (wrapped in an interface object with type/value keys). -// -// JSON tags (e.g. `json:"name,omitempty"`) are supported in the same way as encoding/json, as is -// custom marshaling overrides via the json.Marshaler and json.Unmarshaler interfaces. -// -// Note that not all JSON emitted by Tendermint is generated by this library; some is generated by -// encoding/json instead, and kept like that for backwards compatibility. -// -// Encoding of numbers uses strings for 64-bit integers (including unspecified ints), to improve -// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit -// precision): -// -// int32(32) // Output: 32 -// uint32(32) // Output: 32 -// int64(64) // Output: "64" -// uint64(64) // Output: "64" -// int(64) // Output: "64" -// uint(64) // Output: "64" -// -// Encoding of other scalars follows encoding/json: -// -// nil // Output: null -// true // Output: true -// "foo" // Output: "foo" -// "" // Output: "" -// -// Slices and arrays are encoded as encoding/json, including base64-encoding of byte slices -// with additional base64-encoding of byte arrays as well: -// -// []int64(nil) // Output: null -// []int64{} // Output: [] -// []int64{1, 2, 3} // Output: ["1", "2", "3"] -// []int32{1, 2, 3} // Output: [1, 2, 3] -// []byte{1, 2, 3} // Output: "AQID" -// [3]int64{1, 2, 3} // Output: ["1", "2", "3"] -// [3]byte{1, 2, 3} // Output: "AQID" -// -// Maps are encoded as encoding/json, but only strings are allowed as map keys (nil maps are not -// emitted as null, to retain Amino backwards-compatibility): -// -// map[string]int64(nil) // Output: {} -// map[string]int64{} // Output: {} -// map[string]int64{"a":1,"b":2} // Output: {"a":"1","b":"2"} -// map[string]int32{"a":1,"b":2} // Output: {"a":1,"b":2} -// map[bool]int{true:1} // Errors -// -// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero -// times emitted as "0001-01-01T00:00:00Z" as with encoding/json): -// -// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) -// // Output: "2020-06-08T14:21:28.000000123Z" -// time.Time{} // Output: "0001-01-01T00:00:00Z" -// (*time.Time)(nil) // Output: null -// -// Structs are encoded as encoding/json, supporting JSON tags and ignoring private fields: -// -// type Struct struct{ -// Name string -// Value int32 `json:"value,omitempty"` -// private bool -// } -// -// Struct{Name: "foo", Value: 7, private: true} // Output: {"Name":"foo","value":7} -// Struct{} // Output: {"Name":""} -// -// Registered types are encoded with type wrapper, regardless of whether they are given as interface -// or bare struct, but inside structs they are only emitted with type wrapper for interface fields -// (this follows Amino behavior): -// -// type Vehicle interface { -// Drive() error -// } -// -// type Car struct { -// Wheels int8 -// } -// -// func (c *Car) Drive() error { return nil } -// -// RegisterType(&Car{}, "vehicle/car") -// -// Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// &Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// (*Car)(nil) // Output: null -// Vehicle(Car{Wheels: 4}) // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// Vehicle(nil) // Output: null -// -// type Struct struct { -// Car *Car -// Vehicle Vehicle -// } -// -// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} -// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} -// -package json diff --git a/libs/json/encoder.go b/libs/json/encoder.go deleted file mode 100644 index 11990e2af6..0000000000 --- a/libs/json/encoder.go +++ /dev/null @@ -1,254 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "time" -) - -var ( - timeType = reflect.TypeOf(time.Time{}) - jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() - jsonUnmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() -) - -// Marshal marshals the value as JSON, using Amino-compatible JSON encoding (strings for -// 64-bit numbers, and type wrappers for registered types). -func Marshal(v interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - err := encode(buf, v) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalIndent marshals the value as JSON, using the given prefix and indentation. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - bz, err := Marshal(v) - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - err = json.Indent(buf, bz, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func encode(w io.Writer, v interface{}) error { - // Bare nil values can't be reflected, so we must handle them here. - if v == nil { - return writeStr(w, "null") - } - rv := reflect.ValueOf(v) - - // If this is a registered type, defer to interface encoder regardless of whether the input is - // an interface or a bare value. This retains Amino's behavior, but is inconsistent with - // behavior in structs where an interface field will get the type wrapper while a bare value - // field will not. - if typeRegistry.name(rv.Type()) != "" { - return encodeReflectInterface(w, rv) - } - - return encodeReflect(w, rv) -} - -func encodeReflect(w io.Writer, rv reflect.Value) error { - if !rv.IsValid() { - return errors.New("invalid reflect value") - } - - // Recursively dereference if pointer. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return writeStr(w, "null") - } - rv = rv.Elem() - } - - // Convert times to UTC. - if rv.Type() == timeType { - rv = reflect.ValueOf(rv.Interface().(time.Time).Round(0).UTC()) - } - - // If the value implements json.Marshaler, defer to stdlib directly. Since we've already - // dereferenced, we try implementations with both value receiver and pointer receiver. We must - // do this after the time normalization above, and thus after dereferencing. - if rv.Type().Implements(jsonMarshalerType) { - return encodeStdlib(w, rv.Interface()) - } else if rv.CanAddr() && rv.Addr().Type().Implements(jsonMarshalerType) { - return encodeStdlib(w, rv.Addr().Interface()) - } - - switch rv.Type().Kind() { - // Complex types must be recursively encoded. - case reflect.Interface: - return encodeReflectInterface(w, rv) - - case reflect.Array, reflect.Slice: - return encodeReflectList(w, rv) - - case reflect.Map: - return encodeReflectMap(w, rv) - - case reflect.Struct: - return encodeReflectStruct(w, rv) - - // 64-bit integers are emitted as strings, to avoid precision problems with e.g. - // Javascript which uses 64-bit floats (having 53-bit precision). - case reflect.Int64, reflect.Int: - return writeStr(w, `"`+strconv.FormatInt(rv.Int(), 10)+`"`) - - case reflect.Uint64, reflect.Uint: - return writeStr(w, `"`+strconv.FormatUint(rv.Uint(), 10)+`"`) - - // For everything else, defer to the stdlib encoding/json encoder - default: - return encodeStdlib(w, rv.Interface()) - } -} - -func encodeReflectList(w io.Writer, rv reflect.Value) error { - // Emit nil slices as null. - if rv.Kind() == reflect.Slice && rv.IsNil() { - return writeStr(w, "null") - } - - // Encode byte slices as base64 with the stdlib encoder. - if rv.Type().Elem().Kind() == reflect.Uint8 { - // Stdlib does not base64-encode byte arrays, only slices, so we copy to slice. - if rv.Type().Kind() == reflect.Array { - slice := reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), rv.Len(), rv.Len()) - reflect.Copy(slice, rv) - rv = slice - } - return encodeStdlib(w, rv.Interface()) - } - - // Anything else we recursively encode ourselves. - length := rv.Len() - if err := writeStr(w, "["); err != nil { - return err - } - for i := 0; i < length; i++ { - if err := encodeReflect(w, rv.Index(i)); err != nil { - return err - } - if i < length-1 { - if err := writeStr(w, ","); err != nil { - return err - } - } - } - return writeStr(w, "]") -} - -func encodeReflectMap(w io.Writer, rv reflect.Value) error { - if rv.Type().Key().Kind() != reflect.String { - return errors.New("map key must be string") - } - - // nil maps are not emitted as nil, to retain Amino compatibility. - - if err := writeStr(w, "{"); err != nil { - return err - } - writeComma := false - for _, keyrv := range rv.MapKeys() { - if writeComma { - if err := writeStr(w, ","); err != nil { - return err - } - } - if err := encodeStdlib(w, keyrv.Interface()); err != nil { - return err - } - if err := writeStr(w, ":"); err != nil { - return err - } - if err := encodeReflect(w, rv.MapIndex(keyrv)); err != nil { - return err - } - writeComma = true - } - return writeStr(w, "}") -} - -func encodeReflectStruct(w io.Writer, rv reflect.Value) error { - sInfo := makeStructInfo(rv.Type()) - if err := writeStr(w, "{"); err != nil { - return err - } - writeComma := false - for i, fInfo := range sInfo.fields { - frv := rv.Field(i) - if fInfo.hidden || (fInfo.omitEmpty && frv.IsZero()) { - continue - } - - if writeComma { - if err := writeStr(w, ","); err != nil { - return err - } - } - if err := encodeStdlib(w, fInfo.jsonName); err != nil { - return err - } - if err := writeStr(w, ":"); err != nil { - return err - } - if err := encodeReflect(w, frv); err != nil { - return err - } - writeComma = true - } - return writeStr(w, "}") -} - -func encodeReflectInterface(w io.Writer, rv reflect.Value) error { - // Get concrete value and dereference pointers. - for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { - if rv.IsNil() { - return writeStr(w, "null") - } - rv = rv.Elem() - } - - // Look up the name of the concrete type - name := typeRegistry.name(rv.Type()) - if name == "" { - return fmt.Errorf("cannot encode unregistered type %v", rv.Type()) - } - - // Write value wrapped in interface envelope - if err := writeStr(w, fmt.Sprintf(`{"type":%q,"value":`, name)); err != nil { - return err - } - if err := encodeReflect(w, rv); err != nil { - return err - } - return writeStr(w, "}") -} - -func encodeStdlib(w io.Writer, v interface{}) error { - // Doesn't stream the output because that adds a newline, as per: - // https://golang.org/pkg/encoding/json/#Encoder.Encode - blob, err := json.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(blob) - return err -} - -func writeStr(w io.Writer, s string) error { - _, err := w.Write([]byte(s)) - return err -} diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go deleted file mode 100644 index 88eb56f857..0000000000 --- a/libs/json/encoder_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package json_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/json" -) - -func TestMarshal(t *testing.T) { - s := "string" - sPtr := &s - i64 := int64(64) - ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) - car := &Car{Wheels: 4} - boat := Boat{Sail: true} - - testcases := map[string]struct { - value interface{} - output string - }{ - "nil": {nil, `null`}, - "string": {"foo", `"foo"`}, - "float32": {float32(3.14), `3.14`}, - "float32 neg": {float32(-3.14), `-3.14`}, - "float64": {float64(3.14), `3.14`}, - "float64 neg": {float64(-3.14), `-3.14`}, - "int32": {int32(32), `32`}, - "int64": {int64(64), `"64"`}, - "int64 neg": {int64(-64), `"-64"`}, - "int64 ptr": {&i64, `"64"`}, - "uint64": {uint64(64), `"64"`}, - "time": {ti, `"2020-06-02T16:05:13.004346374Z"`}, - "time empty": {time.Time{}, `"0001-01-01T00:00:00Z"`}, - "time ptr": {&ti, `"2020-06-02T16:05:13.004346374Z"`}, - "customptr": {CustomPtr{Value: "x"}, `{"Value":"x"}`}, // same as encoding/json - "customptr ptr": {&CustomPtr{Value: "x"}, `"custom"`}, - "customvalue": {CustomValue{Value: "x"}, `"custom"`}, - "customvalue ptr": {&CustomValue{Value: "x"}, `"custom"`}, - "slice nil": {[]int(nil), `null`}, - "slice empty": {[]int{}, `[]`}, - "slice bytes": {[]byte{1, 2, 3}, `"AQID"`}, - "slice int64": {[]int64{1, 2, 3}, `["1","2","3"]`}, - "slice int64 ptr": {[]*int64{&i64, nil}, `["64",null]`}, - "array bytes": {[3]byte{1, 2, 3}, `"AQID"`}, - "array int64": {[3]int64{1, 2, 3}, `["1","2","3"]`}, - "map nil": {map[string]int64(nil), `{}`}, // retain Amino compatibility - "map empty": {map[string]int64{}, `{}`}, - "map int64": {map[string]int64{"a": 1, "b": 2, "c": 3}, `{"a":"1","b":"2","c":"3"}`}, - "car": {car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car value": {*car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car iface": {Vehicle(car), `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car nil": {(*Car)(nil), `null`}, - "boat": {boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "boat ptr": {&boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "boat iface": {Vehicle(boat), `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "key public": {PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, `{"type":"key/public","value":"AQIDBAUGBwg="}`}, - "tags": { - Tags{JSONName: "name", OmitEmpty: "foo", Hidden: "bar", Tags: &Tags{JSONName: "child"}}, - `{"name":"name","OmitEmpty":"foo","tags":{"name":"child"}}`, - }, - "tags empty": {Tags{}, `{"name":""}`}, - // The encoding of the Car and Boat fields do not have type wrappers, even though they get - // type wrappers when encoded directly (see "car" and "boat" tests). This is to retain the - // same behavior as Amino. If the field was a Vehicle interface instead, it would get - // type wrappers, as seen in the Vehicles field. - "struct": { - Struct{ - Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, - String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, - Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, - Child: &Struct{Bool: false, String: "child"}, private: "private", - }, - `{ - "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", - "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", - "Time":"2020-06-02T16:05:13.004346374Z", - "Car":{"Wheels":4}, - "Boat":{"Sail":true}, - "Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"vehicle/boat","value":{"Sail":true}} - ], - "Child":{ - "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, - "String":"child", "StringPtrPtr":null, "Bytes":null, - "Time":"0001-01-01T00:00:00Z", - "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null - } - }`, - }, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - bz, err := json.Marshal(tc.value) - require.NoError(t, err) - assert.JSONEq(t, tc.output, string(bz)) - }) - } -} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go deleted file mode 100644 index ccb3c00388..0000000000 --- a/libs/json/helpers_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package json_test - -import ( - "time" - - "github.com/tendermint/tendermint/libs/json" -) - -// Register Car, an instance of the Vehicle interface. -func init() { - json.RegisterType(&Car{}, "vehicle/car") - json.RegisterType(Boat{}, "vehicle/boat") - json.RegisterType(PublicKey{}, "key/public") - json.RegisterType(PrivateKey{}, "key/private") -} - -type Vehicle interface { - Drive() error -} - -// Car is a pointer implementation of Vehicle. -type Car struct { - Wheels int32 -} - -func (c *Car) Drive() error { return nil } - -// Boat is a value implementation of Vehicle. -type Boat struct { - Sail bool -} - -func (b Boat) Drive() error { return nil } - -// These are public and private encryption keys. -type PublicKey [8]byte -type PrivateKey [8]byte - -// Custom has custom marshalers and unmarshalers, taking pointer receivers. -type CustomPtr struct { - Value string -} - -func (c *CustomPtr) MarshalJSON() ([]byte, error) { - return []byte("\"custom\""), nil -} - -func (c *CustomPtr) UnmarshalJSON(bz []byte) error { - c.Value = "custom" - return nil -} - -// CustomValue has custom marshalers and unmarshalers, taking value receivers (which usually doesn't -// make much sense since the unmarshaler can't change anything). -type CustomValue struct { - Value string -} - -func (c CustomValue) MarshalJSON() ([]byte, error) { - return []byte("\"custom\""), nil -} - -func (c CustomValue) UnmarshalJSON(bz []byte) error { - return nil -} - -// Tags tests JSON tags. -type Tags struct { - JSONName string `json:"name"` - OmitEmpty string `json:",omitempty"` - Hidden string `json:"-"` - Tags *Tags `json:"tags,omitempty"` -} - -// Struct tests structs with lots of contents. -type Struct struct { - Bool bool - Float64 float64 - Int32 int32 - Int64 int64 - Int64Ptr *int64 - String string - StringPtrPtr **string - Bytes []byte - Time time.Time - Car *Car - Boat Boat - Vehicles []Vehicle - Child *Struct - private string -} diff --git a/libs/json/structs.go b/libs/json/structs.go deleted file mode 100644 index b9521114af..0000000000 --- a/libs/json/structs.go +++ /dev/null @@ -1,88 +0,0 @@ -package json - -import ( - "fmt" - "reflect" - "strings" - "unicode" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // cache caches struct info. - cache = newStructInfoCache() -) - -// structCache is a cache of struct info. -type structInfoCache struct { - tmsync.RWMutex - structInfos map[reflect.Type]*structInfo -} - -func newStructInfoCache() *structInfoCache { - return &structInfoCache{ - structInfos: make(map[reflect.Type]*structInfo), - } -} - -func (c *structInfoCache) get(rt reflect.Type) *structInfo { - c.RLock() - defer c.RUnlock() - return c.structInfos[rt] -} - -func (c *structInfoCache) set(rt reflect.Type, sInfo *structInfo) { - c.Lock() - defer c.Unlock() - c.structInfos[rt] = sInfo -} - -// structInfo contains JSON info for a struct. -type structInfo struct { - fields []*fieldInfo -} - -// fieldInfo contains JSON info for a struct field. -type fieldInfo struct { - jsonName string - omitEmpty bool - hidden bool -} - -// makeStructInfo generates structInfo for a struct as a reflect.Value. -func makeStructInfo(rt reflect.Type) *structInfo { - if rt.Kind() != reflect.Struct { - panic(fmt.Sprintf("can't make struct info for non-struct value %v", rt)) - } - if sInfo := cache.get(rt); sInfo != nil { - return sInfo - } - fields := make([]*fieldInfo, 0, rt.NumField()) - for i := 0; i < cap(fields); i++ { - frt := rt.Field(i) - fInfo := &fieldInfo{ - jsonName: frt.Name, - omitEmpty: false, - hidden: frt.Name == "" || !unicode.IsUpper(rune(frt.Name[0])), - } - o := frt.Tag.Get("json") - if o == "-" { - fInfo.hidden = true - } else if o != "" { - opts := strings.Split(o, ",") - if opts[0] != "" { - fInfo.jsonName = opts[0] - } - for _, o := range opts[1:] { - if o == "omitempty" { - fInfo.omitEmpty = true - } - } - } - fields = append(fields, fInfo) - } - sInfo := &structInfo{fields: fields} - cache.set(rt, sInfo) - return sInfo -} diff --git a/libs/json/types.go b/libs/json/types.go deleted file mode 100644 index 9f21e81eb8..0000000000 --- a/libs/json/types.go +++ /dev/null @@ -1,109 +0,0 @@ -package json - -import ( - "errors" - "fmt" - "reflect" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // typeRegistry contains globally registered types for JSON encoding/decoding. - typeRegistry = newTypes() -) - -// RegisterType registers a type for Amino-compatible interface encoding in the global type -// registry. These types will be encoded with a type wrapper `{"type":"","value":}` -// regardless of which interface they are wrapped in (if any). If the type is a pointer, it will -// still be valid both for value and pointer types, but decoding into an interface will generate -// the a value or pointer based on the registered type. -// -// Should only be called in init() functions, as it panics on error. -func RegisterType(_type interface{}, name string) { - if _type == nil { - panic("cannot register nil type") - } - err := typeRegistry.register(name, reflect.ValueOf(_type).Type()) - if err != nil { - panic(err) - } -} - -// typeInfo contains type information. -type typeInfo struct { - name string - rt reflect.Type - returnPtr bool -} - -// types is a type registry. It is safe for concurrent use. -type types struct { - tmsync.RWMutex - byType map[reflect.Type]*typeInfo - byName map[string]*typeInfo -} - -// newTypes creates a new type registry. -func newTypes() types { - return types{ - byType: map[reflect.Type]*typeInfo{}, - byName: map[string]*typeInfo{}, - } -} - -// registers the given type with the given name. The name and type must not be registered already. -func (t *types) register(name string, rt reflect.Type) error { - if name == "" { - return errors.New("name cannot be empty") - } - // If this is a pointer type, we recursively resolve until we get a bare type, but register that - // we should return pointers. - returnPtr := false - for rt.Kind() == reflect.Ptr { - returnPtr = true - rt = rt.Elem() - } - tInfo := &typeInfo{ - name: name, - rt: rt, - returnPtr: returnPtr, - } - - t.Lock() - defer t.Unlock() - if _, ok := t.byName[tInfo.name]; ok { - return fmt.Errorf("a type with name %q is already registered", name) - } - if _, ok := t.byType[tInfo.rt]; ok { - return fmt.Errorf("the type %v is already registered", rt) - } - t.byName[name] = tInfo - t.byType[rt] = tInfo - return nil -} - -// lookup looks up a type from a name, or nil if not registered. -func (t *types) lookup(name string) (reflect.Type, bool) { - t.RLock() - defer t.RUnlock() - tInfo := t.byName[name] - if tInfo == nil { - return nil, false - } - return tInfo.rt, tInfo.returnPtr -} - -// name looks up the name of a type, or empty if not registered. Unwraps pointers as necessary. -func (t *types) name(rt reflect.Type) string { - for rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - t.RLock() - defer t.RUnlock() - tInfo := t.byType[rt] - if tInfo == nil { - return "" - } - return tInfo.name -} diff --git a/libs/log/default.go b/libs/log/default.go index ca48fcd724..47800a3d9c 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -14,8 +14,6 @@ var _ Logger = (*defaultLogger)(nil) type defaultLogger struct { zerolog.Logger - - trace bool } // NewDefaultLogger returns a default logger that can be used within Tendermint @@ -26,7 +24,7 @@ type defaultLogger struct { // Since zerolog supports typed structured logging and it is difficult to reflect // that in a generic interface, all logging methods accept a series of key/value // pair tuples, where the key must be a string. -func NewDefaultLogger(format, level string, trace bool) (Logger, error) { +func NewDefaultLogger(format, level string) (Logger, error) { var logWriter io.Writer switch strings.ToLower(format) { case LogFormatPlain, LogFormatText: @@ -59,14 +57,13 @@ func NewDefaultLogger(format, level string, trace bool) (Logger, error) { return defaultLogger{ Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger(), - trace: trace, }, nil } // MustNewDefaultLogger delegates a call NewDefaultLogger where it panics on // error. -func MustNewDefaultLogger(format, level string, trace bool) Logger { - logger, err := NewDefaultLogger(format, level, trace) +func MustNewDefaultLogger(format, level string) Logger { + logger, err := NewDefaultLogger(format, level) if err != nil { panic(err) } @@ -80,9 +77,6 @@ func (l defaultLogger) Info(msg string, keyVals ...interface{}) { func (l defaultLogger) Error(msg string, keyVals ...interface{}) { e := l.Logger.Error() - if l.trace { - e = e.Stack() - } e.Fields(getLogFields(keyVals...)).Msg(msg) } @@ -94,7 +88,6 @@ func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { func (l defaultLogger) With(keyVals ...interface{}) Logger { return defaultLogger{ Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(), - trace: l.trace, } } diff --git a/libs/log/default_test.go b/libs/log/default_test.go index c66508f048..5e8e188101 100644 --- a/libs/log/default_test.go +++ b/libs/log/default_test.go @@ -34,12 +34,9 @@ func TestNewDefaultLogger(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { - _, err := log.NewDefaultLogger(tc.format, tc.level, false) + _, err := log.NewDefaultLogger(tc.format, tc.level) if tc.expectErr { require.Error(t, err) - require.Panics(t, func() { - _ = log.MustNewDefaultLogger(tc.format, tc.level, false) - }) } else { require.NoError(t, err) } diff --git a/libs/log/nop.go b/libs/log/nop.go index 4b3bfb1967..e49540c10e 100644 --- a/libs/log/nop.go +++ b/libs/log/nop.go @@ -7,6 +7,5 @@ import ( func NewNopLogger() Logger { return defaultLogger{ Logger: zerolog.Nop(), - trace: false, } } diff --git a/libs/log/testing.go b/libs/log/testing.go index 9894f6a507..649ebab40d 100644 --- a/libs/log/testing.go +++ b/libs/log/testing.go @@ -1,41 +1,60 @@ package log import ( - "io" - "os" - "sync" "testing" -) -var ( - // reuse the same logger across all tests - testingLoggerMtx = sync.Mutex{} - testingLogger Logger + "github.com/rs/zerolog" ) -// TestingLogger returns a Logger which writes to STDOUT if test(s) are being -// run with the verbose (-v) flag, NopLogger otherwise. -// -// NOTE: -// - A call to NewTestingLogger() must be made inside a test (not in the init func) -// because verbose flag only set at the time of testing. +// TestingLogger was a legacy constructor that wrote logging output to +// standardoutput when in verbose mode, and no-op'ed test logs +// otherwise. Now it always no-ops, but if you need to see logs from +// tests, you can replace this call with `NewTestingLogger` +// constructor. func TestingLogger() Logger { - return TestingLoggerWithOutput(os.Stdout) + return NewNopLogger() } -func TestingLoggerWithOutput(w io.Writer) Logger { - testingLoggerMtx.Lock() - defer testingLoggerMtx.Unlock() +type testingWriter struct { + t testing.TB +} - if testingLogger != nil { - return testingLogger - } +func (tw testingWriter) Write(in []byte) (int, error) { + tw.t.Log(string(in)) + return len(in), nil +} +// NewTestingLogger converts a testing.T into a logging interface to +// make test failures and verbose provide better feedback associated +// with test failures. This logging instance is safe for use from +// multiple threads, but in general you should create one of these +// loggers ONCE for each *testing.T instance that you interact with. +// +// By default it collects only ERROR messages, or DEBUG messages in +// verbose mode, and relies on the underlying behavior of +// testing.T.Log() +// +// Users should be careful to ensure that no calls to this logger are +// made in goroutines that are running after (which, by the rules of +// testing.TB will panic.) +func NewTestingLogger(t testing.TB) Logger { + level := LogLevelError if testing.Verbose() { - testingLogger = MustNewDefaultLogger(LogFormatText, LogLevelDebug, true) - } else { - testingLogger = NewNopLogger() + level = LogLevelDebug + } + + return NewTestingLoggerWithLevel(t, level) +} + +// NewTestingLoggerWithLevel creates a testing logger instance at a +// specific level that wraps the behavior of testing.T.Log(). +func NewTestingLoggerWithLevel(t testing.TB, level string) Logger { + logLevel, err := zerolog.ParseLevel(level) + if err != nil { + t.Fatalf("failed to parse log level (%s): %v", level, err) } - return testingLogger + return defaultLogger{ + Logger: zerolog.New(newSyncWriter(testingWriter{t})).Level(logLevel), + } } diff --git a/libs/os/os.go b/libs/os/os.go index f4b0f1810a..3d74c22082 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -5,35 +5,8 @@ import ( "fmt" "io" "os" - "os/signal" - "syscall" ) -type logger interface { - Info(msg string, keyvals ...interface{}) -} - -// TrapSignal catches SIGTERM and SIGINT, executes the cleanup function, -// and exits with code 0. -func TrapSignal(logger logger, cb func()) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - go func() { - sig := <-c - logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) - if cb != nil { - cb() - } - os.Exit(0) - }() -} - -func Exit(s string) { - fmt.Printf(s + "\n") - os.Exit(1) -} - // EnsureDir ensures the given directory exists, creating it if necessary. // Errors if the path already exists as a non-directory. func EnsureDir(dir string, mode os.FileMode) error { diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 3a31de04a0..fe503f9214 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -3,20 +3,16 @@ package os_test import ( "bytes" "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" - "syscall" "testing" - "time" "github.com/stretchr/testify/require" tmos "github.com/tendermint/tendermint/libs/os" ) func TestCopyFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") if err != nil { t.Fatal(err) } @@ -33,7 +29,7 @@ func TestCopyFile(t *testing.T) { if _, err := os.Stat(copyfile); os.IsNotExist(err) { t.Fatal("copy should exist") } - data, err := ioutil.ReadFile(copyfile) + data, err := os.ReadFile(copyfile) if err != nil { t.Fatal(err) } @@ -43,34 +39,8 @@ func TestCopyFile(t *testing.T) { os.Remove(copyfile) } -func TestTrapSignal(t *testing.T) { - if os.Getenv("TM_TRAP_SIGNAL_TEST") == "1" { - t.Log("inside test process") - killer() - return - } - - cmd, _, mockStderr := newTestProgram(t, "TM_TRAP_SIGNAL_TEST") - - err := cmd.Run() - if err == nil { - wantStderr := "exiting" - if mockStderr.String() != wantStderr { - t.Fatalf("stderr: want %q, got %q", wantStderr, mockStderr.String()) - } - - return - } - - if e, ok := err.(*exec.ExitError); ok && !e.Success() { - t.Fatalf("wrong exit code, want 0, got %d", e.ExitCode()) - } - - t.Fatal("this error should not be triggered") -} - func TestEnsureDir(t *testing.T) { - tmp, err := ioutil.TempDir("", "ensure-dir") + tmp, err := os.MkdirTemp("", "ensure-dir") require.NoError(t, err) defer os.RemoveAll(tmp) @@ -84,7 +54,7 @@ func TestEnsureDir(t *testing.T) { require.NoError(t, err) // Should fail on file. - err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) require.NoError(t, err) err = tmos.EnsureDir(filepath.Join(tmp, "file"), 0755) require.Error(t, err) @@ -102,45 +72,11 @@ func TestEnsureDir(t *testing.T) { require.Error(t, err) } -type mockLogger struct{} - -func (ml mockLogger) Info(msg string, keyvals ...interface{}) {} - -func killer() { - logger := mockLogger{} - - tmos.TrapSignal(logger, func() { _, _ = fmt.Fprintf(os.Stderr, "exiting") }) - time.Sleep(1 * time.Second) - - p, err := os.FindProcess(os.Getpid()) - if err != nil { - panic(err) - } - - if err := p.Signal(syscall.SIGTERM); err != nil { - panic(err) - } - - time.Sleep(1 * time.Second) -} - -func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *bytes.Buffer, stderr *bytes.Buffer) { - t.Helper() - - cmd = exec.Command(os.Args[0], "-test.run="+t.Name()) - stdout, stderr = bytes.NewBufferString(""), bytes.NewBufferString("") - cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", environVar)) - cmd.Stdout = stdout - cmd.Stderr = stderr - - return -} - // Ensure that using CopyFile does not truncate the destination file before // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate") + tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") if err != nil { t.Fatal(err) } @@ -148,12 +84,12 @@ func TestTrickedTruncation(t *testing.T) { originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { t.Fatal(err) } // 1. Sanity check. - readWAL, err := ioutil.ReadFile(originalWALPath) + readWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } @@ -168,7 +104,7 @@ func TestTrickedTruncation(t *testing.T) { } // 3. Check the WAL's content - reReadWAL, err := ioutil.ReadFile(originalWALPath) + reReadWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go deleted file mode 100644 index fd4a94382a..0000000000 --- a/libs/pubsub/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package pubsub_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - - subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "abci.account", - Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, - }, - } - err = s.PublishWithEvents(ctx, "Tombstone", events) - require.NoError(t, err) - - assertReceive(t, "Tombstone", subscription.Out()) -} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go deleted file mode 100644 index 68d1ec9412..0000000000 --- a/libs/pubsub/pubsub.go +++ /dev/null @@ -1,527 +0,0 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). -// -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. -// -// Example: -// -// q, err := query.New("account.name='John'") -// if err != nil { -// return err -// } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) -// if err != nil { -// return err -// } -// -// for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Canceled(): -// return subscription.Err() -// } -// } -// -package pubsub - -import ( - "context" - "errors" - "fmt" - - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/service" -) - -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - -var ( - // ErrSubscriptionNotFound is returned when a client tries to unsubscribe - // from not existing subscription. - ErrSubscriptionNotFound = errors.New("subscription not found") - - // ErrAlreadySubscribed is returned when a client tries to subscribe twice or - // more using the same query. - ErrAlreadySubscribed = errors.New("already subscribed") -) - -// Query defines an interface for a query to be used for subscribing. A query -// matches against a map of events. Each key in this map is a composite of the -// even type and an attribute key (e.g. "{eventType}.{eventAttrKey}") and the -// values are the event values that are contained under that relationship. This -// allows event types to repeat themselves with the same set of keys and -// different values. -type Query interface { - Matches(events []types.Event) (bool, error) - String() string -} - -type UnsubscribeArgs struct { - ID string - Subscriber string - Query Query -} - -func (args UnsubscribeArgs) Validate() error { - if args.Subscriber == "" { - return errors.New("must specify a subscriber") - } - - if args.ID == "" && args.Query == nil { - return fmt.Errorf("subscription is not fully defined [subscriber=%q]", args.Subscriber) - } - - return nil -} - -type cmd struct { - op operation - - // subscribe, unsubscribe - query Query - subscription *Subscription - clientID string - - // publish - msg interface{} - events []types.Event -} - -// Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without events, and manages internal state. -type Server struct { - service.BaseService - - cmds chan cmd - cmdsCap int - - // check if we have subscription before - // subscribing or unsubscribing - mtx tmsync.RWMutex - - // subscriber -> [query->id (string) OR id->query (string))], - // track connections both by ID (new) and query (legacy) to - // avoid breaking the interface. - subscriptions map[string]map[string]string -} - -// Option sets a parameter for the server. -type Option func(*Server) - -// NewServer returns a new server. See the commentary on the Option functions -// for a detailed description of how to configure buffering. If no options are -// provided, the resulting server's queue is unbuffered. -func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]string), - } - s.BaseService = *service.NewBaseService(nil, "PubSub", s) - - for _, option := range options { - option(s) - } - - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) - - return s -} - -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). -func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } - } -} - -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} - -// Subscribe creates a subscription for the given client. -// -// An error will be returned to the caller if the context is canceled or if -// subscription already exist for pair clientID and query. -// -// outCapacity can be used to set a capacity for Subscription#Out channel (1 by -// default). Panics if outCapacity is less than or equal to zero. If you want -// an unbuffered channel, use SubscribeUnbuffered. -func (s *Server) Subscribe( - ctx context.Context, - clientID string, - query Query, - outCapacity ...int) (*Subscription, error) { - outCap := 1 - if len(outCapacity) > 0 { - if outCapacity[0] <= 0 { - panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel") - } - outCap = outCapacity[0] - } - - return s.subscribe(ctx, clientID, query, outCap) -} - -// SubscribeUnbuffered does the same as Subscribe, except it returns a -// subscription with unbuffered channel. Use with caution as it can freeze the -// server. -func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) { - return s.subscribe(ctx, clientID, query, 0) -} - -func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { - return nil, ErrAlreadySubscribed - } - - subscription := NewSubscription(outCapacity) - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]string) - } - s.subscriptions[clientID][query.String()] = subscription.id - s.subscriptions[clientID][subscription.id] = query.String() - s.mtx.Unlock() - return subscription, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.Quit(): - return nil, nil - } -} - -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. -func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { - if err := args.Validate(); err != nil { - return err - } - var qs string - - if args.Query != nil { - qs = args.Query.String() - } - - clientSubscriptions, err := func() (map[string]string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] - - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return nil, err - } - } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - - if !ok { - return nil, ErrSubscriptionNotFound - } - - return clientSubscriptions, nil - }() - - if err != nil { - return err - } - - select { - case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(clientSubscriptions, args.ID) - delete(clientSubscriptions, qs) - - if len(clientSubscriptions) == 0 { - delete(s.subscriptions, args.Subscriber) - } - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(s.subscriptions, clientID) - - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// NumClients returns the number of clients. -func (s *Server) NumClients() int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions) -} - -// NumClientSubscriptions returns the number of subscriptions the client has. -func (s *Server) NumClientSubscriptions(clientID string) int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions[clientID]) / 2 -} - -// Publish publishes the given message. An error will be returned to the caller -// if the context is canceled. -func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) -} - -// PublishWithEvents publishes the given message with the set of events. The set -// is matched with clients queries. If there is a match, the message is sent to -// the client. -func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, events: events}: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} - -// NOTE: not goroutine safe -type state struct { - // query string -> client -> subscription - subscriptions map[string]map[string]*Subscription - // query string -> queryPlusRefCount - queries map[string]*queryPlusRefCount -} - -// queryPlusRefCount holds a pointer to a query and reference counter. When -// refCount is zero, query will be removed. -type queryPlusRefCount struct { - q Query - refCount int -} - -// OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - subscriptions: make(map[string]map[string]*Subscription), - queries: make(map[string]*queryPlusRefCount), - }) - return nil -} - -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} - -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query.String(), cmd.subscription.id, ErrUnsubscribed) - } else { - state.removeClient(cmd.clientID, ErrUnsubscribed) - } - case shutdown: - state.removeAll(nil) - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.subscription) - case pub: - if err := state.send(cmd.msg, cmd.events); err != nil { - s.Logger.Error("Error querying for events", "err", err) - } - } - } -} - -func (state *state) add(clientID string, q Query, subscription *Subscription) { - qStr := q.String() - - // initialize subscription for this client per query if needed - if _, ok := state.subscriptions[qStr]; !ok { - state.subscriptions[qStr] = make(map[string]*Subscription) - } - - if _, ok := state.subscriptions[subscription.id]; !ok { - state.subscriptions[subscription.id] = make(map[string]*Subscription) - } - - // create subscription - state.subscriptions[qStr][clientID] = subscription - state.subscriptions[subscription.id][clientID] = subscription - - // initialize query if needed - if _, ok := state.queries[qStr]; !ok { - state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} - } - // increment reference counter - state.queries[qStr].refCount++ -} - -func (state *state) remove(clientID string, qStr, id string, reason error) { - clientSubscriptions, ok := state.subscriptions[qStr] - if !ok { - return - } - - subscription, ok := clientSubscriptions[clientID] - if !ok { - return - } - - subscription.cancel(reason) - - // remove client from query map. - // if query has no other clients subscribed, remove it. - delete(state.subscriptions[qStr], clientID) - delete(state.subscriptions[id], clientID) - if len(state.subscriptions[qStr]) == 0 { - delete(state.subscriptions, qStr) - } - - // decrease ref counter in queries - if ref, ok := state.queries[qStr]; ok { - ref.refCount-- - if ref.refCount == 0 { - // remove the query if nobody else is using it - delete(state.queries, qStr) - } - } -} - -func (state *state) removeClient(clientID string, reason error) { - seen := map[string]struct{}{} - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[clientID]; ok { - if _, ok = seen[sub.id]; ok { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - state.remove(clientID, qStr, sub.id, reason) - seen[sub.id] = struct{}{} - } - } -} - -func (state *state) removeAll(reason error) { - for qStr, clientSubscriptions := range state.subscriptions { - sub, ok := clientSubscriptions[qStr] - if !ok || ok && sub.id == qStr { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - - for clientID := range clientSubscriptions { - state.remove(clientID, qStr, sub.id, reason) - } - } -} - -func (state *state) send(msg interface{}, events []types.Event) error { - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { - continue - } - var q Query - if qi, ok := state.queries[qStr]; ok { - q = qi.q - } else { - continue - } - - match, err := q.Matches(events) - if err != nil { - return fmt.Errorf("failed to match against query %s: %w", q.String(), err) - } - - if match { - for clientID, subscription := range clientSubscriptions { - if cap(subscription.out) == 0 { - // block on unbuffered channel - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - case <-subscription.canceled: - } - } else { - // don't block on buffered channels - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - default: - state.remove(clientID, qStr, subscription.id, ErrOutOfCapacity) - } - } - } - } - } - - return nil -} diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go deleted file mode 100644 index 5254154930..0000000000 --- a/libs/pubsub/pubsub_test.go +++ /dev/null @@ -1,572 +0,0 @@ -package pubsub_test - -import ( - "context" - "fmt" - "runtime/debug" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -const ( - clientID = "test-client" -) - -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - - require.Equal(t, 1, s.NumClients()) - require.Equal(t, 1, s.NumClientSubscriptions(clientID)) - - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", subscription.Out()) - - published := make(chan struct{}) - go func() { - defer close(published) - - err := s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - - err = s.Publish(ctx, "Asylum") - require.NoError(t, err) - - err = s.Publish(ctx, "Ivan") - require.NoError(t, err) - }() - - select { - case <-published: - assertReceive(t, "Quicksilver", subscription.Out()) - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) - case <-time.After(3 * time.Second): - t.Fatal("Expected Publish(Asylum) not to block") - } -} - -func TestSubscribeWithCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, -1) - require.NoError(t, err) - }) - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.Empty{}, 0) - require.NoError(t, err) - }) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) - require.NoError(t, err) - err = s.Publish(ctx, "Aggamon") - require.NoError(t, err) - assertReceive(t, "Aggamon", subscription.Out()) -} - -func TestSubscribeUnbuffered(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{}) - require.NoError(t, err) - - published := make(chan struct{}) - go func() { - defer close(published) - - err := s.Publish(ctx, "Ultron") - require.NoError(t, err) - - err = s.Publish(ctx, "Darkhawk") - require.NoError(t, err) - }() - - select { - case <-published: - t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(3 * time.Second): - assertReceive(t, "Ultron", subscription.Out()) - assertReceive(t, "Darkhawk", subscription.Out()) - } -} - -func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Publish(ctx, "Fat Cobra") - require.NoError(t, err) - err = s.Publish(ctx, "Viper") - require.NoError(t, err) - - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) -} - -func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - - subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - assertReceive(t, "Iceman", subscription1.Out()) - - subscription2, err := s.Subscribe( - ctx, - "client-2", - query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), - ) - require.NoError(t, err) - - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - { - Type: "abci.account", - Attributes: []abci.EventAttribute{{Key: "name", Value: "Igor"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Ultimo", events)) - assertReceive(t, "Ultimo", subscription1.Out()) - assertReceive(t, "Ultimo", subscription2.Out()) - - subscription3, err := s.Subscribe( - ctx, - "client-3", - query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), - ) - require.NoError(t, err) - - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Valeria Richards", events)) - require.Zero(t, len(subscription3.Out())) -} - -func TestSubscribeDuplicateKeys(t *testing.T) { - ctx := context.Background() - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - testCases := []struct { - query string - expected interface{} - }{ - { - "withdraw.rewards='17'", - "Iceman", - }, - { - "withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='1' AND withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='100'", - nil, - }, - } - - for i, tc := range testCases { - sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustParse(tc.query)) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "sender", Value: "bar"}, - {Key: "sender", Value: "baz"}, - }, - }, - { - Type: "withdraw", - Attributes: []abci.EventAttribute{ - {Key: "rewards", Value: "1"}, - {Key: "rewards", Value: "17"}, - {Key: "rewards", Value: "22"}, - }, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - - if tc.expected != nil { - assertReceive(t, tc.expected, sub.Out()) - } else { - require.Zero(t, len(sub.Out())) - } - } -} - -func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - q := query.MustParse("tm.events.type='NewBlock'") - - subscription1, err := s.Subscribe(ctx, clientID, q) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Goblin Queen", events)) - assertReceive(t, "Goblin Queen", subscription1.Out()) - - subscription2, err := s.Subscribe(ctx, clientID, q) - require.Error(t, err) - require.Nil(t, subscription2) - - require.NoError(t, s.PublishWithEvents(ctx, "Spider-Man", events)) - assertReceive(t, "Spider-Man", subscription1.Out()) -} - -func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe") - - assertCanceled(t, subscription, pubsub.ErrUnsubscribed) -} - -func TestClientUnsubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.NoError(t, err) - - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustParse("tm.events.type='NewBlock'")}) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) - err = s.UnsubscribeAll(ctx, clientID) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) -} - -func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{Subscriber: clientID, Query: query.Empty{}}) - require.NoError(t, err) - subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", subscription.Out()) -} - -func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - subscription2, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'")) - require.NoError(t, err) - - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll") - require.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll") - - assertCanceled(t, subscription1, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription2, pubsub.ErrUnsubscribed) -} - -func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) - - require.Equal(t, 2, s.BufferCapacity()) - - ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() - - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - require.Equal(t, context.DeadlineExceeded, err) - } -} - -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } - -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - for i := 0; i < n; i++ { - subscription, err := s.Subscribe( - ctx, - clientID, - query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), - ) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: string(rune(i))}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - subscription, err := s.Subscribe(ctx, clientID, q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: "1"}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - -// HELPERS - -func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - require.Equal(t, expected, actual.Data(), msgAndArgs...) - case <-time.After(1 * time.Second): - t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() - } -} - -func assertCanceled(t *testing.T, subscription *pubsub.Subscription, err error) { - _, ok := <-subscription.Canceled() - require.False(t, ok) - require.Equal(t, err, subscription.Err()) -} diff --git a/libs/pubsub/query/Makefile b/libs/pubsub/query/Makefile deleted file mode 100644 index aef42b2dff..0000000000 --- a/libs/pubsub/query/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -gen_query_parser: - go get -u -v github.com/pointlander/peg - peg -inline -switch query.peg - -fuzzy_test: - go get -u -v github.com/dvyukov/go-fuzz/go-fuzz - go get -u -v github.com/dvyukov/go-fuzz/go-fuzz-build - go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test - go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output - -.PHONY: gen_query_parser fuzzy_test diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go deleted file mode 100644 index dd6b3f3b20..0000000000 --- a/libs/pubsub/query/empty.go +++ /dev/null @@ -1,18 +0,0 @@ -package query - -import ( - "github.com/tendermint/tendermint/abci/types" -) - -// Empty query matches any set of events. -type Empty struct { -} - -// Matches always returns true. -func (Empty) Matches(events []types.Event) (bool, error) { - return true, nil -} - -func (Empty) String() string { - return "empty" -} diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go deleted file mode 100644 index 4bb3067d63..0000000000 --- a/libs/pubsub/query/empty_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestEmptyQueryMatchesAnything(t *testing.T) { - q := query.Empty{} - - testCases := []struct { - events []abci.Event - }{ - { - []abci.Event{}, - }, - { - []abci.Event{ - { - Type: "Asher", - Attributes: []abci.EventAttribute{{Key: "Roth"}}, - }, - }, - }, - { - []abci.Event{ - { - Type: "Route", - Attributes: []abci.EventAttribute{{Key: "66"}}, - }, - }, - }, - { - []abci.Event{ - { - Type: "Route", - Attributes: []abci.EventAttribute{{Key: "66"}}, - }, - { - Type: "Billy", - Attributes: []abci.EventAttribute{{Key: "Blue"}}, - }, - }, - }, - } - - for _, tc := range testCases { - match, err := q.Matches(tc.events) - require.Nil(t, err) - require.True(t, match) - } -} diff --git a/libs/pubsub/query/fuzz_test/main.go b/libs/pubsub/query/fuzz_test/main.go deleted file mode 100644 index 7a46116b50..0000000000 --- a/libs/pubsub/query/fuzz_test/main.go +++ /dev/null @@ -1,30 +0,0 @@ -package fuzz_test - -import ( - "fmt" - - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func Fuzz(data []byte) int { - sdata := string(data) - q0, err := query.New(sdata) - if err != nil { - return 0 - } - - sdata1 := q0.String() - q1, err := query.New(sdata1) - if err != nil { - panic(err) - } - - sdata2 := q1.String() - if sdata1 != sdata2 { - fmt.Printf("q0: %q\n", sdata1) - fmt.Printf("q1: %q\n", sdata2) - panic("query changed") - } - - return 1 -} diff --git a/libs/pubsub/query/parser_test.go b/libs/pubsub/query/parser_test.go deleted file mode 100644 index a08a0d16d0..0000000000 --- a/libs/pubsub/query/parser_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -// TODO: fuzzy testing? -func TestParser(t *testing.T) { - cases := []struct { - query string - valid bool - }{ - {"tm.events.type='NewBlock'", true}, - {"tm.events.type = 'NewBlock'", true}, - {"tm.events.name = ''", true}, - {"tm.events.type='TIME'", true}, - {"tm.events.type='DATE'", true}, - {"tm.events.type='='", true}, - {"tm.events.type='TIME", false}, - {"tm.events.type=TIME'", false}, - {"tm.events.type==", false}, - {"tm.events.type=NewBlock", false}, - {">==", false}, - {"tm.events.type 'NewBlock' =", false}, - {"tm.events.type>'NewBlock'", false}, - {"", false}, - {"=", false}, - {"='NewBlock'", false}, - {"tm.events.type=", false}, - - {"tm.events.typeNewBlock", false}, - {"tm.events.type'NewBlock'", false}, - {"'NewBlock'", false}, - {"NewBlock", false}, - {"", false}, - - {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, - {"tm.events.type='NewBlock' AND", false}, - {"tm.events.type='NewBlock' AN", false}, - {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, - {"AND tm.events.type='NewBlock' ", false}, - - {"abci.account.name CONTAINS 'Igor'", true}, - - {"tx.date > DATE 2013-05-03", true}, - {"tx.date < DATE 2013-05-03", true}, - {"tx.date <= DATE 2013-05-03", true}, - {"tx.date >= DATE 2013-05-03", true}, - {"tx.date >= DAT 2013-05-03", false}, - {"tx.date <= DATE2013-05-03", false}, - {"tx.date <= DATE -05-03", false}, - {"tx.date >= DATE 20130503", false}, - {"tx.date >= DATE 2013+01-03", false}, - // incorrect year, month, day - {"tx.date >= DATE 0013-01-03", false}, - {"tx.date >= DATE 2013-31-03", false}, - {"tx.date >= DATE 2013-01-83", false}, - - {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, - {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, - {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME2013-05-03T14:45:00Z", false}, - {"tx.date = IME 2013-05-03T14:45:00Z", false}, - {"tx.date = TIME 2013-05-:45:00Z", false}, - {"tx.date >= TIME 2013-05-03T14:45:00", false}, - {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, - {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, - - {"account.balance=100", true}, - {"account.balance >= 200", true}, - {"account.balance >= -300", false}, - {"account.balance >>= 400", false}, - {"account.balance=33.22.1", false}, - - {"slashing.amount EXISTS", true}, - {"slashing.amount EXISTS AND account.balance=100", true}, - {"account.balance=100 AND slashing.amount EXISTS", true}, - {"slashing EXISTS", true}, - - {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, - {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, - } - - for _, c := range cases { - _, err := query.New(c.query) - if c.valid { - assert.NoErrorf(t, err, "Query was '%s'", c.query) - } else { - assert.Errorf(t, err, "Query was '%s'", c.query) - } - } -} diff --git a/libs/pubsub/query/peg.go b/libs/pubsub/query/peg.go deleted file mode 100644 index 816589f02f..0000000000 --- a/libs/pubsub/query/peg.go +++ /dev/null @@ -1,3 +0,0 @@ -package query - -//go:generate peg -inline -switch query.peg diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go deleted file mode 100644 index 7b1dfe0f9d..0000000000 --- a/libs/pubsub/query/query.go +++ /dev/null @@ -1,527 +0,0 @@ -// Package query provides a parser for a custom query format: -// -// abci.invoice.number=22 AND abci.invoice.owner=Ivan -// -// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. -// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics -// -// It has a support for numbers (integer and floating point), dates and times. -package query - -import ( - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/tendermint/tendermint/abci/types" -) - -var ( - numRegex = regexp.MustCompile(`([0-9\.]+)`) -) - -// Query holds the query string and the query parser. -type Query struct { - str string - parser *QueryParser -} - -// Condition represents a single condition within a query and consists of composite key -// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). -type Condition struct { - CompositeKey string - Op Operator - Operand interface{} -} - -// New parses the given string and returns a query or error if the string is -// invalid. -func New(s string) (*Query, error) { - p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} - p.Init() - if err := p.Parse(); err != nil { - return nil, err - } - return &Query{str: s, parser: p}, nil -} - -// MustParse turns the given string into a query or panics; for tests or others -// cases where you know the string is valid. -func MustParse(s string) *Query { - q, err := New(s) - if err != nil { - panic(fmt.Sprintf("failed to parse %s: %v", s, err)) - } - return q -} - -// String returns the original string. -func (q *Query) String() string { - return q.str -} - -// Operator is an operator that defines some kind of relation between composite key and -// operand (equality, etc.). -type Operator uint8 - -const ( - // "<=" - OpLessEqual Operator = iota - // ">=" - OpGreaterEqual - // "<" - OpLess - // ">" - OpGreater - // "=" - OpEqual - // "CONTAINS"; used to check if a string contains a certain sub string. - OpContains - // "EXISTS"; used to check if a certain event attribute is present. - OpExists -) - -const ( - // DateLayout defines a layout for all dates (`DATE date`) - DateLayout = "2006-01-02" - // TimeLayout defines a layout for all times (`TIME time`) - TimeLayout = time.RFC3339 -) - -// Conditions returns a list of conditions. It returns an error if there is any -// error with the provided grammar in the Query. -func (q *Query) Conditions() ([]Condition, error) { - var ( - eventAttr string - op Operator - ) - - conditions := make([]Condition, 0) - buffer, begin, end := q.parser.Buffer, 0, 0 - - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") - for token := range q.parser.Tokens() { - switch token.pegRule { - case rulePegText: - begin, end = int(token.begin), int(token.end) - - case ruletag: - eventAttr = buffer[begin:end] - - case rulele: - op = OpLessEqual - - case rulege: - op = OpGreaterEqual - - case rulel: - op = OpLess - - case ruleg: - op = OpGreater - - case ruleequal: - op = OpEqual - - case rulecontains: - op = OpContains - - case ruleexists: - op = OpExists - conditions = append(conditions, Condition{eventAttr, op, nil}) - - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - conditions = append(conditions, Condition{eventAttr, op, valueWithoutSingleQuotes}) - - case rulenumber: - number := buffer[begin:end] - if strings.ContainsAny(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", - err, number, - ) - return nil, err - } - - conditions = append(conditions, Condition{eventAttr, op, value}) - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, - ) - return nil, err - } - - conditions = append(conditions, Condition{eventAttr, op, value}) - } - - case ruletime: - value, err := time.Parse(TimeLayout, buffer[begin:end]) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", - err, buffer[begin:end], - ) - return nil, err - } - - conditions = append(conditions, Condition{eventAttr, op, value}) - - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", - err, buffer[begin:end], - ) - return nil, err - } - - conditions = append(conditions, Condition{eventAttr, op, value}) - } - } - - return conditions, nil -} - -// Matches returns true if the query matches against any event in the given set -// of events, false otherwise. For each event, a match exists if the query is -// matched against *any* value in a slice of values. An error is returned if -// any attempted event match returns an error. -// -// For example, query "name=John" matches events = {"name": ["John", "Eric"]}. -// More examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(rawEvents []types.Event) (bool, error) { - if len(rawEvents) == 0 { - return false, nil - } - - events := flattenEvents(rawEvents) - - var ( - eventAttr string - op Operator - ) - - buffer, begin, end := q.parser.Buffer, 0, 0 - - // tokens must be in the following order: - - // tag ("tx.gas") -> operator ("=") -> operand ("7") - for token := range q.parser.Tokens() { - switch token.pegRule { - case rulePegText: - begin, end = int(token.begin), int(token.end) - - case ruletag: - eventAttr = buffer[begin:end] - - case rulele: - op = OpLessEqual - - case rulege: - op = OpGreaterEqual - - case rulel: - op = OpLess - - case ruleg: - op = OpGreater - - case ruleequal: - op = OpEqual - - case rulecontains: - op = OpContains - case ruleexists: - op = OpExists - if strings.Contains(eventAttr, ".") { - // Searching for a full "type.attribute" event. - _, ok := events[eventAttr] - if !ok { - return false, nil - } - } else { - foundEvent := false - - loop: - for compositeKey := range events { - if strings.Index(compositeKey, eventAttr) == 0 { - foundEvent = true - break loop - } - } - if !foundEvent { - return false, nil - } - } - - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - - // see if the triplet (event attribute, operator, operand) matches any event - // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - match, err := match(eventAttr, op, reflect.ValueOf(valueWithoutSingleQuotes), events) - if err != nil { - return false, err - } - - if !match { - return false, nil - } - - case rulenumber: - number := buffer[begin:end] - if strings.ContainsAny(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", - err, number, - ) - return false, err - } - - match, err := match(eventAttr, op, reflect.ValueOf(value), events) - if err != nil { - return false, err - } - - if !match { - return false, nil - } - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, - ) - return false, err - } - - match, err := match(eventAttr, op, reflect.ValueOf(value), events) - if err != nil { - return false, err - } - - if !match { - return false, nil - } - } - - case ruletime: - value, err := time.Parse(TimeLayout, buffer[begin:end]) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", - err, buffer[begin:end], - ) - return false, err - } - - match, err := match(eventAttr, op, reflect.ValueOf(value), events) - if err != nil { - return false, err - } - - if !match { - return false, nil - } - - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", - err, buffer[begin:end], - ) - return false, err - } - - match, err := match(eventAttr, op, reflect.ValueOf(value), events) - if err != nil { - return false, err - } - - if !match { - return false, nil - } - } - } - - return true, nil -} - -// match returns true if the given triplet (attribute, operator, operand) matches -// any value in an event for that attribute. If any match fails with an error, -// that error is returned. -// -// First, it looks up the key in the events and if it finds one, tries to compare -// all the values from it to the operand using the operator. -// -// "tx.gas", "=", "7", {"tx": [{"gas": 7, "ID": "4AE393495334"}]} -func match(attr string, op Operator, operand reflect.Value, events map[string][]string) (bool, error) { - // look up the tag from the query in tags - values, ok := events[attr] - if !ok { - return false, nil - } - - for _, value := range values { - // return true if any value in the set of the event's values matches - match, err := matchValue(value, op, operand) - if err != nil { - return false, err - } - - if match { - return true, nil - } - } - - return false, nil -} - -// matchValue will attempt to match a string value against an operator an -// operand. A boolean is returned representing the match result. It will return -// an error if the value cannot be parsed and matched against the operand type. -func matchValue(value string, op Operator, operand reflect.Value) (bool, error) { - switch operand.Kind() { - case reflect.Struct: // time - operandAsTime := operand.Interface().(time.Time) - - // try our best to convert value from events to time.Time - var ( - v time.Time - err error - ) - - if strings.ContainsAny(value, "T") { - v, err = time.Parse(TimeLayout, value) - } else { - v, err = time.Parse(DateLayout, value) - } - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to time.Time: %w", value, err) - } - - switch op { - case OpLessEqual: - return (v.Before(operandAsTime) || v.Equal(operandAsTime)), nil - case OpGreaterEqual: - return (v.Equal(operandAsTime) || v.After(operandAsTime)), nil - case OpLess: - return v.Before(operandAsTime), nil - case OpGreater: - return v.After(operandAsTime), nil - case OpEqual: - return v.Equal(operandAsTime), nil - } - - case reflect.Float64: - var v float64 - - operandFloat64 := operand.Interface().(float64) - filteredValue := numRegex.FindString(value) - - // try our best to convert value from tags to float64 - v, err := strconv.ParseFloat(filteredValue, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) - } - - switch op { - case OpLessEqual: - return v <= operandFloat64, nil - case OpGreaterEqual: - return v >= operandFloat64, nil - case OpLess: - return v < operandFloat64, nil - case OpGreater: - return v > operandFloat64, nil - case OpEqual: - return v == operandFloat64, nil - } - - case reflect.Int64: - var v int64 - - operandInt := operand.Interface().(int64) - filteredValue := numRegex.FindString(value) - - // if value looks like float, we try to parse it as float - if strings.ContainsAny(filteredValue, ".") { - v1, err := strconv.ParseFloat(filteredValue, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) - } - - v = int64(v1) - } else { - var err error - // try our best to convert value from tags to int64 - v, err = strconv.ParseInt(filteredValue, 10, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to int64: %w", filteredValue, err) - } - } - - switch op { - case OpLessEqual: - return v <= operandInt, nil - case OpGreaterEqual: - return v >= operandInt, nil - case OpLess: - return v < operandInt, nil - case OpGreater: - return v > operandInt, nil - case OpEqual: - return v == operandInt, nil - } - - case reflect.String: - switch op { - case OpEqual: - return value == operand.String(), nil - case OpContains: - return strings.Contains(value, operand.String()), nil - } - - default: - return false, fmt.Errorf("unknown kind of operand %v", operand.Kind()) - } - - return false, nil -} - -func flattenEvents(events []types.Event) map[string][]string { - flattened := make(map[string][]string) - - for _, event := range events { - if len(event.Type) == 0 { - continue - } - - for _, attr := range event.Attributes { - if len(attr.Key) == 0 { - continue - } - - compositeEvent := fmt.Sprintf("%s.%s", event.Type, attr.Key) - flattened[compositeEvent] = append(flattened[compositeEvent], attr.Value) - } - } - - return flattened -} diff --git a/libs/pubsub/query/query.peg b/libs/pubsub/query/query.peg deleted file mode 100644 index e2cfd08262..0000000000 --- a/libs/pubsub/query/query.peg +++ /dev/null @@ -1,35 +0,0 @@ -package query - -type QueryParser Peg { -} - -e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. - -condition <- tag ' '* (le ' '* (number / time / date) - / ge ' '* (number / time / date) - / l ' '* (number / time / date) - / g ' '* (number / time / date) - / equal ' '* (number / time / date / value) - / contains ' '* value - / exists - ) - -tag <- < (![ \t\n\r\\()"'=><] .)+ > -value <- < '\'' (!["'] .)* '\''> -number <- < ('0' - / [1-9] digit* ('.' digit*)?) > -digit <- [0-9] -time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > -date <- "DATE " < year '-' month '-' day > -year <- ('1' / '2') digit digit digit -month <- ('0' / '1') digit -day <- ('0' / '1' / '2' / '3') digit -and <- "AND" - -equal <- "=" -contains <- "CONTAINS" -exists <- "EXISTS" -le <- "<=" -ge <- ">=" -l <- "<" -g <- ">" diff --git a/libs/pubsub/query/query.peg.go b/libs/pubsub/query/query.peg.go deleted file mode 100644 index a8e14c8698..0000000000 --- a/libs/pubsub/query/query.peg.go +++ /dev/null @@ -1,1871 +0,0 @@ -// nolint -package query - -import ( - "fmt" - "math" - "sort" - "strconv" -) - -const endSymbol rune = 1114112 - -/* The rule types inferred from the grammar are below. */ -type pegRule uint8 - -const ( - ruleUnknown pegRule = iota - rulee - rulecondition - ruletag - rulevalue - rulenumber - ruledigit - ruletime - ruledate - ruleyear - rulemonth - ruleday - ruleand - ruleequal - rulecontains - ruleexists - rulele - rulege - rulel - ruleg - rulePegText - - rulePre - ruleIn - ruleSuf -) - -var rul3s = [...]string{ - "Unknown", - "e", - "condition", - "tag", - "value", - "number", - "digit", - "time", - "date", - "year", - "month", - "day", - "and", - "equal", - "contains", - "exists", - "le", - "ge", - "l", - "g", - "PegText", - - "Pre_", - "_In_", - "_Suf", -} - -type node32 struct { - token32 - up, next *node32 -} - -func (node *node32) print(depth int, buffer string) { - for node != nil { - for c := 0; c < depth; c++ { - fmt.Printf(" ") - } - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rul3s[node.pegRule], strconv.Quote(string(([]rune(buffer)[node.begin:node.end])))) - if node.up != nil { - node.up.print(depth+1, buffer) - } - node = node.next - } -} - -func (node *node32) Print(buffer string) { - node.print(0, buffer) -} - -type element struct { - node *node32 - down *element -} - -/* ${@} bit structure for abstract syntax tree */ -type token32 struct { - pegRule - begin, end, next uint32 -} - -func (t *token32) isZero() bool { - return t.pegRule == ruleUnknown && t.begin == 0 && t.end == 0 && t.next == 0 -} - -func (t *token32) isParentOf(u token32) bool { - return t.begin <= u.begin && t.end >= u.end && t.next > u.next -} - -func (t *token32) getToken32() token32 { - return token32{pegRule: t.pegRule, begin: uint32(t.begin), end: uint32(t.end), next: uint32(t.next)} -} - -func (t *token32) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v %v", rul3s[t.pegRule], t.begin, t.end, t.next) -} - -type tokens32 struct { - tree []token32 - ordered [][]token32 -} - -func (t *tokens32) trim(length int) { - t.tree = t.tree[0:length] -} - -func (t *tokens32) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens32) Order() [][]token32 { - if t.ordered != nil { - return t.ordered - } - - depths := make([]int32, 1, math.MaxInt16) - for i, token := range t.tree { - if token.pegRule == ruleUnknown { - t.tree = t.tree[:i] - break - } - depth := int(token.next) - if length := len(depths); depth >= length { - depths = depths[:depth+1] - } - depths[depth]++ - } - depths = append(depths, 0) - - ordered, pool := make([][]token32, len(depths)), make([]token32, len(t.tree)+len(depths)) - for i, depth := range depths { - depth++ - ordered[i], pool, depths[i] = pool[:depth], pool[depth:], 0 - } - - for i, token := range t.tree { - depth := token.next - token.next = uint32(i) - ordered[depth][depths[depth]] = token - depths[depth]++ - } - t.ordered = ordered - return ordered -} - -type state32 struct { - token32 - depths []int32 - leaf bool -} - -func (t *tokens32) AST() *node32 { - tokens := t.Tokens() - stack := &element{node: &node32{token32: <-tokens}} - for token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - return stack.node -} - -func (t *tokens32) PreOrder() (<-chan state32, [][]token32) { - s, ordered := make(chan state32, 6), t.Order() - go func() { - var states [8]state32 - for i := range states { - states[i].depths = make([]int32, len(ordered)) - } - depths, state, depth := make([]int32, len(ordered)), 0, 1 - write := func(t token32, leaf bool) { - S := states[state] - state, S.pegRule, S.begin, S.end, S.next, S.leaf = (state+1)%8, t.pegRule, t.begin, t.end, uint32(depth), leaf - copy(S.depths, depths) - s <- S - } - - states[state].token32 = ordered[0][0] - depths[0]++ - state++ - a, b := ordered[depth-1][depths[depth-1]-1], ordered[depth][depths[depth]] - depthFirstSearch: - for { - for { - if i := depths[depth]; i > 0 { - if c, j := ordered[depth][i-1], depths[depth-1]; a.isParentOf(c) && - (j < 2 || !ordered[depth-1][j-2].isParentOf(c)) { - if c.end != b.begin { - write(token32{pegRule: ruleIn, begin: c.end, end: b.begin}, true) - } - break - } - } - - if a.begin < b.begin { - write(token32{pegRule: rulePre, begin: a.begin, end: b.begin}, true) - } - break - } - - next := depth + 1 - if c := ordered[next][depths[next]]; c.pegRule != ruleUnknown && b.isParentOf(c) { - write(b, false) - depths[depth]++ - depth, a, b = next, b, c - continue - } - - write(b, true) - depths[depth]++ - c, parent := ordered[depth][depths[depth]], true - for { - if c.pegRule != ruleUnknown && a.isParentOf(c) { - b = c - continue depthFirstSearch - } else if parent && b.end != a.end { - write(token32{pegRule: ruleSuf, begin: b.end, end: a.end}, true) - } - - depth-- - if depth > 0 { - a, b, c = ordered[depth-1][depths[depth-1]-1], a, ordered[depth][depths[depth]] - parent = a.isParentOf(b) - continue - } - - break depthFirstSearch - } - } - - close(s) - }() - return s, ordered -} - -func (t *tokens32) PrintSyntax() { - tokens, ordered := t.PreOrder() - max := -1 - for token := range tokens { - if !token.leaf { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[36m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[36m%v\x1B[m\n", rul3s[token.pegRule]) - } else if token.begin == token.end { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[31m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[31m%v\x1B[m\n", rul3s[token.pegRule]) - } else { - for c, end := token.begin, token.end; c < end; c++ { - if i := int(c); max+1 < i { - for j := max; j < i; j++ { - fmt.Printf("skip %v %v\n", j, token.String()) - } - max = i - } else if i := int(c); i <= max { - for j := i; j <= max; j++ { - fmt.Printf("dupe %v %v\n", j, token.String()) - } - } else { - max = int(c) - } - fmt.Printf("%v", c) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[34m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[34m%v\x1B[m\n", rul3s[token.pegRule]) - } - fmt.Printf("\n") - } - } -} - -func (t *tokens32) PrintSyntaxTree(buffer string) { - tokens, _ := t.PreOrder() - for token := range tokens { - for c := 0; c < int(token.next); c++ { - fmt.Printf(" ") - } - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rul3s[token.pegRule], strconv.Quote(string(([]rune(buffer)[token.begin:token.end])))) - } -} - -func (t *tokens32) Add(rule pegRule, begin, end, depth uint32, index int) { - t.tree[index] = token32{pegRule: rule, begin: uint32(begin), end: uint32(end), next: uint32(depth)} -} - -func (t *tokens32) Tokens() <-chan token32 { - s := make(chan token32, 16) - go func() { - for _, v := range t.tree { - s <- v.getToken32() - } - close(s) - }() - return s -} - -func (t *tokens32) Error() []token32 { - ordered := t.Order() - length := len(ordered) - tokens, length := make([]token32, length), length-1 - for i := range tokens { - o := ordered[length-i] - if len(o) > 1 { - tokens[i] = o[len(o)-2].getToken32() - } - } - return tokens -} - -func (t *tokens32) Expand(index int) { - tree := t.tree - if index >= len(tree) { - expanded := make([]token32, 2*len(tree)) - copy(expanded, tree) - t.tree = expanded - } -} - -type QueryParser struct { - Buffer string - buffer []rune - rules [21]func() bool - Parse func(rule ...int) error - Reset func() - Pretty bool - tokens32 -} - -type textPosition struct { - line, symbol int -} - -type textPositionMap map[int]textPosition - -func translatePositions(buffer []rune, positions []int) textPositionMap { - length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 - sort.Ints(positions) - -search: - for i, c := range buffer { - if c == '\n' { - line, symbol = line+1, 0 - } else { - symbol++ - } - if i == positions[j] { - translations[positions[j]] = textPosition{line, symbol} - for j++; j < length; j++ { - if i != positions[j] { - continue search - } - } - break search - } - } - - return translations -} - -type parseError struct { - p *QueryParser - max token32 -} - -func (e *parseError) Error() string { - tokens, error := []token32{e.max}, "\n" - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - translations := translatePositions(e.p.buffer, positions) - format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" - if e.p.Pretty { - format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" - } - for _, token := range tokens { - begin, end := int(token.begin), int(token.end) - error += fmt.Sprintf(format, - rul3s[token.pegRule], - translations[begin].line, translations[begin].symbol, - translations[end].line, translations[end].symbol, - strconv.Quote(string(e.p.buffer[begin:end]))) - } - - return error -} - -func (p *QueryParser) PrintSyntaxTree() { - p.tokens32.PrintSyntaxTree(p.Buffer) -} - -func (p *QueryParser) Highlighter() { - p.PrintSyntax() -} - -func (p *QueryParser) Init() { - p.buffer = []rune(p.Buffer) - if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { - p.buffer = append(p.buffer, endSymbol) - } - - tree := tokens32{tree: make([]token32, math.MaxInt16)} - var max token32 - position, depth, tokenIndex, buffer, _rules := uint32(0), uint32(0), 0, p.buffer, p.rules - - p.Parse = func(rule ...int) error { - r := 1 - if len(rule) > 0 { - r = rule[0] - } - matches := p.rules[r]() - p.tokens32 = tree - if matches { - p.trim(tokenIndex) - return nil - } - return &parseError{p, max} - } - - p.Reset = func() { - position, tokenIndex, depth = 0, 0, 0 - } - - add := func(rule pegRule, begin uint32) { - tree.Expand(tokenIndex) - tree.Add(rule, begin, position, depth, tokenIndex) - tokenIndex++ - if begin != position && position > max.end { - max = token32{rule, begin, position, depth} - } - } - - matchDot := func() bool { - if buffer[position] != endSymbol { - position++ - return true - } - return false - } - - /*matchChar := func(c byte) bool { - if buffer[position] == c { - position++ - return true - } - return false - }*/ - - /*matchRange := func(lower byte, upper byte) bool { - if c := buffer[position]; c >= lower && c <= upper { - position++ - return true - } - return false - }*/ - - _rules = [...]func() bool{ - nil, - /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ - func() bool { - position0, tokenIndex0, depth0 := position, tokenIndex, depth - { - position1 := position - depth++ - if buffer[position] != rune('"') { - goto l0 - } - position++ - if !_rules[rulecondition]() { - goto l0 - } - l2: - { - position3, tokenIndex3, depth3 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l4: - { - position5, tokenIndex5, depth5 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l5 - } - position++ - goto l4 - l5: - position, tokenIndex, depth = position5, tokenIndex5, depth5 - } - { - position6 := position - depth++ - { - position7, tokenIndex7, depth7 := position, tokenIndex, depth - if buffer[position] != rune('a') { - goto l8 - } - position++ - goto l7 - l8: - position, tokenIndex, depth = position7, tokenIndex7, depth7 - if buffer[position] != rune('A') { - goto l3 - } - position++ - } - l7: - { - position9, tokenIndex9, depth9 := position, tokenIndex, depth - if buffer[position] != rune('n') { - goto l10 - } - position++ - goto l9 - l10: - position, tokenIndex, depth = position9, tokenIndex9, depth9 - if buffer[position] != rune('N') { - goto l3 - } - position++ - } - l9: - { - position11, tokenIndex11, depth11 := position, tokenIndex, depth - if buffer[position] != rune('d') { - goto l12 - } - position++ - goto l11 - l12: - position, tokenIndex, depth = position11, tokenIndex11, depth11 - if buffer[position] != rune('D') { - goto l3 - } - position++ - } - l11: - depth-- - add(ruleand, position6) - } - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l13: - { - position14, tokenIndex14, depth14 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l14 - } - position++ - goto l13 - l14: - position, tokenIndex, depth = position14, tokenIndex14, depth14 - } - if !_rules[rulecondition]() { - goto l3 - } - goto l2 - l3: - position, tokenIndex, depth = position3, tokenIndex3, depth3 - } - if buffer[position] != rune('"') { - goto l0 - } - position++ - { - position15, tokenIndex15, depth15 := position, tokenIndex, depth - if !matchDot() { - goto l15 - } - goto l0 - l15: - position, tokenIndex, depth = position15, tokenIndex15, depth15 - } - depth-- - add(rulee, position1) - } - return true - l0: - position, tokenIndex, depth = position0, tokenIndex0, depth0 - return false - }, - /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('E' | 'e') exists) | (&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ - func() bool { - position16, tokenIndex16, depth16 := position, tokenIndex, depth - { - position17 := position - depth++ - { - position18 := position - depth++ - { - position19 := position - depth++ - { - position22, tokenIndex22, depth22 := position, tokenIndex, depth - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l22 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l22 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l22 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l22 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l22 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l22 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l22 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l22 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l22 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l22 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l22 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l22 - } - position++ - break - } - } - - goto l16 - l22: - position, tokenIndex, depth = position22, tokenIndex22, depth22 - } - if !matchDot() { - goto l16 - } - l20: - { - position21, tokenIndex21, depth21 := position, tokenIndex, depth - { - position24, tokenIndex24, depth24 := position, tokenIndex, depth - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l24 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l24 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l24 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l24 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l24 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l24 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l24 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l24 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l24 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l24 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l24 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l24 - } - position++ - break - } - } - - goto l21 - l24: - position, tokenIndex, depth = position24, tokenIndex24, depth24 - } - if !matchDot() { - goto l21 - } - goto l20 - l21: - position, tokenIndex, depth = position21, tokenIndex21, depth21 - } - depth-- - add(rulePegText, position19) - } - depth-- - add(ruletag, position18) - } - l26: - { - position27, tokenIndex27, depth27 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l27 - } - position++ - goto l26 - l27: - position, tokenIndex, depth = position27, tokenIndex27, depth27 - } - { - position28, tokenIndex28, depth28 := position, tokenIndex, depth - { - position30 := position - depth++ - if buffer[position] != rune('<') { - goto l29 - } - position++ - if buffer[position] != rune('=') { - goto l29 - } - position++ - depth-- - add(rulele, position30) - } - l31: - { - position32, tokenIndex32, depth32 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l32 - } - position++ - goto l31 - l32: - position, tokenIndex, depth = position32, tokenIndex32, depth32 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l29 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l29 - } - break - default: - if !_rules[rulenumber]() { - goto l29 - } - break - } - } - - goto l28 - l29: - position, tokenIndex, depth = position28, tokenIndex28, depth28 - { - position35 := position - depth++ - if buffer[position] != rune('>') { - goto l34 - } - position++ - if buffer[position] != rune('=') { - goto l34 - } - position++ - depth-- - add(rulege, position35) - } - l36: - { - position37, tokenIndex37, depth37 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l37 - } - position++ - goto l36 - l37: - position, tokenIndex, depth = position37, tokenIndex37, depth37 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l34 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l34 - } - break - default: - if !_rules[rulenumber]() { - goto l34 - } - break - } - } - - goto l28 - l34: - position, tokenIndex, depth = position28, tokenIndex28, depth28 - { - switch buffer[position] { - case 'E', 'e': - { - position40 := position - depth++ - { - position41, tokenIndex41, depth41 := position, tokenIndex, depth - if buffer[position] != rune('e') { - goto l42 - } - position++ - goto l41 - l42: - position, tokenIndex, depth = position41, tokenIndex41, depth41 - if buffer[position] != rune('E') { - goto l16 - } - position++ - } - l41: - { - position43, tokenIndex43, depth43 := position, tokenIndex, depth - if buffer[position] != rune('x') { - goto l44 - } - position++ - goto l43 - l44: - position, tokenIndex, depth = position43, tokenIndex43, depth43 - if buffer[position] != rune('X') { - goto l16 - } - position++ - } - l43: - { - position45, tokenIndex45, depth45 := position, tokenIndex, depth - if buffer[position] != rune('i') { - goto l46 - } - position++ - goto l45 - l46: - position, tokenIndex, depth = position45, tokenIndex45, depth45 - if buffer[position] != rune('I') { - goto l16 - } - position++ - } - l45: - { - position47, tokenIndex47, depth47 := position, tokenIndex, depth - if buffer[position] != rune('s') { - goto l48 - } - position++ - goto l47 - l48: - position, tokenIndex, depth = position47, tokenIndex47, depth47 - if buffer[position] != rune('S') { - goto l16 - } - position++ - } - l47: - { - position49, tokenIndex49, depth49 := position, tokenIndex, depth - if buffer[position] != rune('t') { - goto l50 - } - position++ - goto l49 - l50: - position, tokenIndex, depth = position49, tokenIndex49, depth49 - if buffer[position] != rune('T') { - goto l16 - } - position++ - } - l49: - { - position51, tokenIndex51, depth51 := position, tokenIndex, depth - if buffer[position] != rune('s') { - goto l52 - } - position++ - goto l51 - l52: - position, tokenIndex, depth = position51, tokenIndex51, depth51 - if buffer[position] != rune('S') { - goto l16 - } - position++ - } - l51: - depth-- - add(ruleexists, position40) - } - break - case '=': - { - position53 := position - depth++ - if buffer[position] != rune('=') { - goto l16 - } - position++ - depth-- - add(ruleequal, position53) - } - l54: - { - position55, tokenIndex55, depth55 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l55 - } - position++ - goto l54 - l55: - position, tokenIndex, depth = position55, tokenIndex55, depth55 - } - { - switch buffer[position] { - case '\'': - if !_rules[rulevalue]() { - goto l16 - } - break - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '>': - { - position57 := position - depth++ - if buffer[position] != rune('>') { - goto l16 - } - position++ - depth-- - add(ruleg, position57) - } - l58: - { - position59, tokenIndex59, depth59 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l59 - } - position++ - goto l58 - l59: - position, tokenIndex, depth = position59, tokenIndex59, depth59 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '<': - { - position61 := position - depth++ - if buffer[position] != rune('<') { - goto l16 - } - position++ - depth-- - add(rulel, position61) - } - l62: - { - position63, tokenIndex63, depth63 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l63 - } - position++ - goto l62 - l63: - position, tokenIndex, depth = position63, tokenIndex63, depth63 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - default: - { - position65 := position - depth++ - { - position66, tokenIndex66, depth66 := position, tokenIndex, depth - if buffer[position] != rune('c') { - goto l67 - } - position++ - goto l66 - l67: - position, tokenIndex, depth = position66, tokenIndex66, depth66 - if buffer[position] != rune('C') { - goto l16 - } - position++ - } - l66: - { - position68, tokenIndex68, depth68 := position, tokenIndex, depth - if buffer[position] != rune('o') { - goto l69 - } - position++ - goto l68 - l69: - position, tokenIndex, depth = position68, tokenIndex68, depth68 - if buffer[position] != rune('O') { - goto l16 - } - position++ - } - l68: - { - position70, tokenIndex70, depth70 := position, tokenIndex, depth - if buffer[position] != rune('n') { - goto l71 - } - position++ - goto l70 - l71: - position, tokenIndex, depth = position70, tokenIndex70, depth70 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l70: - { - position72, tokenIndex72, depth72 := position, tokenIndex, depth - if buffer[position] != rune('t') { - goto l73 - } - position++ - goto l72 - l73: - position, tokenIndex, depth = position72, tokenIndex72, depth72 - if buffer[position] != rune('T') { - goto l16 - } - position++ - } - l72: - { - position74, tokenIndex74, depth74 := position, tokenIndex, depth - if buffer[position] != rune('a') { - goto l75 - } - position++ - goto l74 - l75: - position, tokenIndex, depth = position74, tokenIndex74, depth74 - if buffer[position] != rune('A') { - goto l16 - } - position++ - } - l74: - { - position76, tokenIndex76, depth76 := position, tokenIndex, depth - if buffer[position] != rune('i') { - goto l77 - } - position++ - goto l76 - l77: - position, tokenIndex, depth = position76, tokenIndex76, depth76 - if buffer[position] != rune('I') { - goto l16 - } - position++ - } - l76: - { - position78, tokenIndex78, depth78 := position, tokenIndex, depth - if buffer[position] != rune('n') { - goto l79 - } - position++ - goto l78 - l79: - position, tokenIndex, depth = position78, tokenIndex78, depth78 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l78: - { - position80, tokenIndex80, depth80 := position, tokenIndex, depth - if buffer[position] != rune('s') { - goto l81 - } - position++ - goto l80 - l81: - position, tokenIndex, depth = position80, tokenIndex80, depth80 - if buffer[position] != rune('S') { - goto l16 - } - position++ - } - l80: - depth-- - add(rulecontains, position65) - } - l82: - { - position83, tokenIndex83, depth83 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l83 - } - position++ - goto l82 - l83: - position, tokenIndex, depth = position83, tokenIndex83, depth83 - } - if !_rules[rulevalue]() { - goto l16 - } - break - } - } - - } - l28: - depth-- - add(rulecondition, position17) - } - return true - l16: - position, tokenIndex, depth = position16, tokenIndex16, depth16 - return false - }, - /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ - nil, - /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ - func() bool { - position85, tokenIndex85, depth85 := position, tokenIndex, depth - { - position86 := position - depth++ - { - position87 := position - depth++ - if buffer[position] != rune('\'') { - goto l85 - } - position++ - l88: - { - position89, tokenIndex89, depth89 := position, tokenIndex, depth - { - position90, tokenIndex90, depth90 := position, tokenIndex, depth - { - position91, tokenIndex91, depth91 := position, tokenIndex, depth - if buffer[position] != rune('"') { - goto l92 - } - position++ - goto l91 - l92: - position, tokenIndex, depth = position91, tokenIndex91, depth91 - if buffer[position] != rune('\'') { - goto l90 - } - position++ - } - l91: - goto l89 - l90: - position, tokenIndex, depth = position90, tokenIndex90, depth90 - } - if !matchDot() { - goto l89 - } - goto l88 - l89: - position, tokenIndex, depth = position89, tokenIndex89, depth89 - } - if buffer[position] != rune('\'') { - goto l85 - } - position++ - depth-- - add(rulePegText, position87) - } - depth-- - add(rulevalue, position86) - } - return true - l85: - position, tokenIndex, depth = position85, tokenIndex85, depth85 - return false - }, - /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ - func() bool { - position93, tokenIndex93, depth93 := position, tokenIndex, depth - { - position94 := position - depth++ - { - position95 := position - depth++ - { - position96, tokenIndex96, depth96 := position, tokenIndex, depth - if buffer[position] != rune('0') { - goto l97 - } - position++ - goto l96 - l97: - position, tokenIndex, depth = position96, tokenIndex96, depth96 - if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l93 - } - position++ - l98: - { - position99, tokenIndex99, depth99 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l99 - } - goto l98 - l99: - position, tokenIndex, depth = position99, tokenIndex99, depth99 - } - { - position100, tokenIndex100, depth100 := position, tokenIndex, depth - if buffer[position] != rune('.') { - goto l100 - } - position++ - l102: - { - position103, tokenIndex103, depth103 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l103 - } - goto l102 - l103: - position, tokenIndex, depth = position103, tokenIndex103, depth103 - } - goto l101 - l100: - position, tokenIndex, depth = position100, tokenIndex100, depth100 - } - l101: - } - l96: - depth-- - add(rulePegText, position95) - } - depth-- - add(rulenumber, position94) - } - return true - l93: - position, tokenIndex, depth = position93, tokenIndex93, depth93 - return false - }, - /* 5 digit <- <[0-9]> */ - func() bool { - position104, tokenIndex104, depth104 := position, tokenIndex, depth - { - position105 := position - depth++ - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l104 - } - position++ - depth-- - add(ruledigit, position105) - } - return true - l104: - position, tokenIndex, depth = position104, tokenIndex104, depth104 - return false - }, - /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ - func() bool { - position106, tokenIndex106, depth106 := position, tokenIndex, depth - { - position107 := position - depth++ - { - position108, tokenIndex108, depth108 := position, tokenIndex, depth - if buffer[position] != rune('t') { - goto l109 - } - position++ - goto l108 - l109: - position, tokenIndex, depth = position108, tokenIndex108, depth108 - if buffer[position] != rune('T') { - goto l106 - } - position++ - } - l108: - { - position110, tokenIndex110, depth110 := position, tokenIndex, depth - if buffer[position] != rune('i') { - goto l111 - } - position++ - goto l110 - l111: - position, tokenIndex, depth = position110, tokenIndex110, depth110 - if buffer[position] != rune('I') { - goto l106 - } - position++ - } - l110: - { - position112, tokenIndex112, depth112 := position, tokenIndex, depth - if buffer[position] != rune('m') { - goto l113 - } - position++ - goto l112 - l113: - position, tokenIndex, depth = position112, tokenIndex112, depth112 - if buffer[position] != rune('M') { - goto l106 - } - position++ - } - l112: - { - position114, tokenIndex114, depth114 := position, tokenIndex, depth - if buffer[position] != rune('e') { - goto l115 - } - position++ - goto l114 - l115: - position, tokenIndex, depth = position114, tokenIndex114, depth114 - if buffer[position] != rune('E') { - goto l106 - } - position++ - } - l114: - if buffer[position] != rune(' ') { - goto l106 - } - position++ - { - position116 := position - depth++ - if !_rules[ruleyear]() { - goto l106 - } - if buffer[position] != rune('-') { - goto l106 - } - position++ - if !_rules[rulemonth]() { - goto l106 - } - if buffer[position] != rune('-') { - goto l106 - } - position++ - if !_rules[ruleday]() { - goto l106 - } - if buffer[position] != rune('T') { - goto l106 - } - position++ - if !_rules[ruledigit]() { - goto l106 - } - if !_rules[ruledigit]() { - goto l106 - } - if buffer[position] != rune(':') { - goto l106 - } - position++ - if !_rules[ruledigit]() { - goto l106 - } - if !_rules[ruledigit]() { - goto l106 - } - if buffer[position] != rune(':') { - goto l106 - } - position++ - if !_rules[ruledigit]() { - goto l106 - } - if !_rules[ruledigit]() { - goto l106 - } - { - position117, tokenIndex117, depth117 := position, tokenIndex, depth - { - position119, tokenIndex119, depth119 := position, tokenIndex, depth - if buffer[position] != rune('-') { - goto l120 - } - position++ - goto l119 - l120: - position, tokenIndex, depth = position119, tokenIndex119, depth119 - if buffer[position] != rune('+') { - goto l118 - } - position++ - } - l119: - if !_rules[ruledigit]() { - goto l118 - } - if !_rules[ruledigit]() { - goto l118 - } - if buffer[position] != rune(':') { - goto l118 - } - position++ - if !_rules[ruledigit]() { - goto l118 - } - if !_rules[ruledigit]() { - goto l118 - } - goto l117 - l118: - position, tokenIndex, depth = position117, tokenIndex117, depth117 - if buffer[position] != rune('Z') { - goto l106 - } - position++ - } - l117: - depth-- - add(rulePegText, position116) - } - depth-- - add(ruletime, position107) - } - return true - l106: - position, tokenIndex, depth = position106, tokenIndex106, depth106 - return false - }, - /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ - func() bool { - position121, tokenIndex121, depth121 := position, tokenIndex, depth - { - position122 := position - depth++ - { - position123, tokenIndex123, depth123 := position, tokenIndex, depth - if buffer[position] != rune('d') { - goto l124 - } - position++ - goto l123 - l124: - position, tokenIndex, depth = position123, tokenIndex123, depth123 - if buffer[position] != rune('D') { - goto l121 - } - position++ - } - l123: - { - position125, tokenIndex125, depth125 := position, tokenIndex, depth - if buffer[position] != rune('a') { - goto l126 - } - position++ - goto l125 - l126: - position, tokenIndex, depth = position125, tokenIndex125, depth125 - if buffer[position] != rune('A') { - goto l121 - } - position++ - } - l125: - { - position127, tokenIndex127, depth127 := position, tokenIndex, depth - if buffer[position] != rune('t') { - goto l128 - } - position++ - goto l127 - l128: - position, tokenIndex, depth = position127, tokenIndex127, depth127 - if buffer[position] != rune('T') { - goto l121 - } - position++ - } - l127: - { - position129, tokenIndex129, depth129 := position, tokenIndex, depth - if buffer[position] != rune('e') { - goto l130 - } - position++ - goto l129 - l130: - position, tokenIndex, depth = position129, tokenIndex129, depth129 - if buffer[position] != rune('E') { - goto l121 - } - position++ - } - l129: - if buffer[position] != rune(' ') { - goto l121 - } - position++ - { - position131 := position - depth++ - if !_rules[ruleyear]() { - goto l121 - } - if buffer[position] != rune('-') { - goto l121 - } - position++ - if !_rules[rulemonth]() { - goto l121 - } - if buffer[position] != rune('-') { - goto l121 - } - position++ - if !_rules[ruleday]() { - goto l121 - } - depth-- - add(rulePegText, position131) - } - depth-- - add(ruledate, position122) - } - return true - l121: - position, tokenIndex, depth = position121, tokenIndex121, depth121 - return false - }, - /* 8 year <- <(('1' / '2') digit digit digit)> */ - func() bool { - position132, tokenIndex132, depth132 := position, tokenIndex, depth - { - position133 := position - depth++ - { - position134, tokenIndex134, depth134 := position, tokenIndex, depth - if buffer[position] != rune('1') { - goto l135 - } - position++ - goto l134 - l135: - position, tokenIndex, depth = position134, tokenIndex134, depth134 - if buffer[position] != rune('2') { - goto l132 - } - position++ - } - l134: - if !_rules[ruledigit]() { - goto l132 - } - if !_rules[ruledigit]() { - goto l132 - } - if !_rules[ruledigit]() { - goto l132 - } - depth-- - add(ruleyear, position133) - } - return true - l132: - position, tokenIndex, depth = position132, tokenIndex132, depth132 - return false - }, - /* 9 month <- <(('0' / '1') digit)> */ - func() bool { - position136, tokenIndex136, depth136 := position, tokenIndex, depth - { - position137 := position - depth++ - { - position138, tokenIndex138, depth138 := position, tokenIndex, depth - if buffer[position] != rune('0') { - goto l139 - } - position++ - goto l138 - l139: - position, tokenIndex, depth = position138, tokenIndex138, depth138 - if buffer[position] != rune('1') { - goto l136 - } - position++ - } - l138: - if !_rules[ruledigit]() { - goto l136 - } - depth-- - add(rulemonth, position137) - } - return true - l136: - position, tokenIndex, depth = position136, tokenIndex136, depth136 - return false - }, - /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ - func() bool { - position140, tokenIndex140, depth140 := position, tokenIndex, depth - { - position141 := position - depth++ - { - switch buffer[position] { - case '3': - if buffer[position] != rune('3') { - goto l140 - } - position++ - break - case '2': - if buffer[position] != rune('2') { - goto l140 - } - position++ - break - case '1': - if buffer[position] != rune('1') { - goto l140 - } - position++ - break - default: - if buffer[position] != rune('0') { - goto l140 - } - position++ - break - } - } - - if !_rules[ruledigit]() { - goto l140 - } - depth-- - add(ruleday, position141) - } - return true - l140: - position, tokenIndex, depth = position140, tokenIndex140, depth140 - return false - }, - /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ - nil, - /* 12 equal <- <'='> */ - nil, - /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ - nil, - /* 14 exists <- <(('e' / 'E') ('x' / 'X') ('i' / 'I') ('s' / 'S') ('t' / 'T') ('s' / 'S'))> */ - nil, - /* 15 le <- <('<' '=')> */ - nil, - /* 16 ge <- <('>' '=')> */ - nil, - /* 17 l <- <'<'> */ - nil, - /* 18 g <- <'>'> */ - nil, - nil, - } - p.rules = _rules -} diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go deleted file mode 100644 index 87f61aafe4..0000000000 --- a/libs/pubsub/query/query_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package query_test - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func expandEvents(flattenedEvents map[string][]string) []abci.Event { - events := make([]abci.Event, len(flattenedEvents)) - - for composite, values := range flattenedEvents { - tokens := strings.Split(composite, ".") - - attrs := make([]abci.EventAttribute, len(values)) - for i, v := range values { - attrs[i] = abci.EventAttribute{ - Key: tokens[len(tokens)-1], - Value: v, - } - } - - events = append(events, abci.Event{ - Type: strings.Join(tokens[:len(tokens)-1], "."), - Attributes: attrs, - }) - } - - return events -} - -func TestMatches(t *testing.T) { - var ( - txDate = "2017-01-01" - txTime = "2018-05-03T14:45:00Z" - ) - - testCases := []struct { - s string - events map[string][]string - err bool - matches bool - matchErr bool - }{ - {"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}}, false, true, false}, - {"tx.gas > 7", map[string][]string{"tx.gas": {"8"}}, false, true, false}, - {"transfer.amount > 7", map[string][]string{"transfer.amount": {"8stake"}}, false, true, false}, - {"transfer.amount > 7", map[string][]string{"transfer.amount": {"8.045stake"}}, false, true, false}, - {"transfer.amount > 7.043", map[string][]string{"transfer.amount": {"8.045stake"}}, false, true, false}, - {"transfer.amount > 8.045", map[string][]string{"transfer.amount": {"8.045stake"}}, false, false, false}, - {"tx.gas > 7 AND tx.gas < 9", map[string][]string{"tx.gas": {"8"}}, false, true, false}, - {"body.weight >= 3.5", map[string][]string{"body.weight": {"3.5"}}, false, true, false}, - {"account.balance < 1000.0", map[string][]string{"account.balance": {"900"}}, false, true, false}, - {"apples.kg <= 4", map[string][]string{"apples.kg": {"4.0"}}, false, true, false}, - {"body.weight >= 4.5", map[string][]string{"body.weight": {fmt.Sprintf("%v", float32(4.5))}}, false, true, false}, - { - "oranges.kg < 4 AND watermellons.kg > 10", - map[string][]string{"oranges.kg": {"3"}, "watermellons.kg": {"12"}}, - false, - true, - false, - }, - {"peaches.kg < 4", map[string][]string{"peaches.kg": {"5"}}, false, false, false}, - { - "tx.date > DATE 2017-01-01", - map[string][]string{"tx.date": {time.Now().Format(query.DateLayout)}}, - false, - true, - false, - }, - {"tx.date = DATE 2017-01-01", map[string][]string{"tx.date": {txDate}}, false, true, false}, - {"tx.date = DATE 2018-01-01", map[string][]string{"tx.date": {txDate}}, false, false, false}, - { - "tx.time >= TIME 2013-05-03T14:45:00Z", - map[string][]string{"tx.time": {time.Now().Format(query.TimeLayout)}}, - false, - true, - false, - }, - {"tx.time = TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {txTime}}, false, false, false}, - {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Igor,Ivan"}}, false, true, false}, - {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Pavel,Ivan"}}, false, false, false}, - {"abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true, false}, - { - "abci.owner.name = 'Ivan'", - map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, - false, - true, - false, - }, - { - "abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'", - map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, - false, - true, - false, - }, - { - "abci.owner.name = 'Ivan' AND abci.owner.name = 'John'", - map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, - false, - false, - false, - }, - { - "tm.events.type='NewBlock'", - map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, - false, - true, - false, - }, - { - "app.name = 'fuzzed'", - map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, - false, - true, - false, - }, - { - "tm.events.type='NewBlock' AND app.name = 'fuzzed'", - map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, - false, - true, - false, - }, - { - "tm.events.type='NewHeader' AND app.name = 'fuzzed'", - map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, - false, - false, - false, - }, - {"slash EXISTS", - map[string][]string{"slash.reason": {"missing_signature"}, "slash.power": {"6000"}}, - false, - true, - false, - }, - {"sl EXISTS", - map[string][]string{"slash.reason": {"missing_signature"}, "slash.power": {"6000"}}, - false, - true, - false, - }, - {"slash EXISTS", - map[string][]string{"transfer.recipient": {"cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz"}, - "transfer.sender": {"cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5"}}, - false, - false, - false, - }, - {"slash.reason EXISTS AND slash.power > 1000", - map[string][]string{"slash.reason": {"missing_signature"}, "slash.power": {"6000"}}, - false, - true, - false, - }, - {"slash.reason EXISTS AND slash.power > 1000", - map[string][]string{"slash.reason": {"missing_signature"}, "slash.power": {"500"}}, - false, - false, - false, - }, - {"slash.reason EXISTS", - map[string][]string{"transfer.recipient": {"cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz"}, - "transfer.sender": {"cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5"}}, - false, - false, - false, - }, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - if !tc.err { - require.Nil(t, err) - } - require.NotNil(t, q, "Query '%s' should not be nil", tc.s) - - rawEvents := expandEvents(tc.events) - - if tc.matches { - match, err := q.Matches(rawEvents) - require.Nil(t, err, "Query '%s' should not error on match %v", tc.s, tc.events) - require.True(t, match, "Query '%s' should match %v", tc.s, tc.events) - } else { - match, err := q.Matches(rawEvents) - require.Equal(t, tc.matchErr, err != nil, "Unexpected error for query '%s' match %v", tc.s, tc.events) - require.False(t, match, "Query '%s' should not match %v", tc.s, tc.events) - } - } -} - -func TestMustParse(t *testing.T) { - require.Panics(t, func() { query.MustParse("=") }) - require.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) -} - -func TestConditions(t *testing.T) { - txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") - require.NoError(t, err) - - testCases := []struct { - s string - conditions []query.Condition - }{ - { - s: "tm.events.type='NewBlock'", - conditions: []query.Condition{ - {CompositeKey: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}, - }, - }, - { - s: "tx.gas > 7 AND tx.gas < 9", - conditions: []query.Condition{ - {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, - {CompositeKey: "tx.gas", Op: query.OpLess, Operand: int64(9)}, - }, - }, - { - s: "tx.time >= TIME 2013-05-03T14:45:00Z", - conditions: []query.Condition{ - {CompositeKey: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}, - }, - }, - { - s: "slashing EXISTS", - conditions: []query.Condition{ - {CompositeKey: "slashing", Op: query.OpExists}, - }, - }, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - require.Nil(t, err) - - c, err := q.Conditions() - require.NoError(t, err) - require.Equal(t, tc.conditions, c) - } -} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go deleted file mode 100644 index 40b84711e8..0000000000 --- a/libs/pubsub/subscription.go +++ /dev/null @@ -1,112 +0,0 @@ -package pubsub - -import ( - "errors" - "fmt" - - "github.com/google/uuid" - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // ErrUnsubscribed is returned by Err when a client unsubscribes. - ErrUnsubscribed = errors.New("client unsubscribed") - - // ErrOutOfCapacity is returned by Err when a client is not pulling messages - // fast enough. Note the client's subscription will be terminated. - ErrOutOfCapacity = errors.New("client is not pulling messages fast enough") -) - -// A Subscription represents a client subscription for a particular query and -// consists of three things: -// 1) channel onto which messages and events are published -// 2) channel which is closed if a client is too slow or choose to unsubscribe -// 3) err indicating the reason for (2) -type Subscription struct { - id string - out chan Message - - canceled chan struct{} - mtx tmsync.RWMutex - err error -} - -// NewSubscription returns a new subscription with the given outCapacity. -func NewSubscription(outCapacity int) *Subscription { - return &Subscription{ - id: uuid.NewString(), - out: make(chan Message, outCapacity), - canceled: make(chan struct{}), - } -} - -// Out returns a channel onto which messages and events are published. -// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from -// receiving a nil message. -func (s *Subscription) Out() <-chan Message { - return s.out -} - -func (s *Subscription) ID() string { return s.id } - -// Canceled returns a channel that's closed when the subscription is -// terminated and supposed to be used in a select statement. -func (s *Subscription) Canceled() <-chan struct{} { - return s.canceled -} - -// Err returns nil if the channel returned by Canceled is not yet closed. -// If the channel is closed, Err returns a non-nil error explaining why: -// - ErrUnsubscribed if the subscriber choose to unsubscribe, -// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, -// After Err returns a non-nil error, successive calls to Err return the same -// error. -func (s *Subscription) Err() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.err -} - -func (s *Subscription) cancel(err error) { - s.mtx.Lock() - defer s.mtx.Unlock() - defer func() { - perr := recover() - if err == nil && perr != nil { - err = fmt.Errorf("problem closing subscription: %v", perr) - } - }() - - if s.err == nil && err != nil { - s.err = err - } - - close(s.canceled) -} - -// Message glues data and events together. -type Message struct { - subID string - data interface{} - events []types.Event -} - -func NewMessage(subID string, data interface{}, events []types.Event) Message { - return Message{ - subID: subID, - data: data, - events: events, - } -} - -// SubscriptionID returns the unique identifier for the subscription -// that produced this message. -func (msg Message) SubscriptionID() string { return msg.subID } - -// Data returns an original data published. -func (msg Message) Data() interface{} { return msg.data } - -// Events returns events, which matched the client's query. -func (msg Message) Events() []types.Event { return msg.events } diff --git a/libs/rand/random.go b/libs/rand/random.go index ee400e1958..6b486a7fdf 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -1,9 +1,6 @@ package rand import ( - crand "crypto/rand" - "encoding/binary" - "fmt" mrand "math/rand" ) @@ -11,48 +8,22 @@ const ( strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters ) -func init() { - Reseed() -} - -// NewRand returns a prng, that is seeded with OS randomness. -// The OS randomness is obtained from crypto/rand, however, like with any math/rand.Rand -// object none of the provided methods are suitable for cryptographic usage. -// -// Note that the returned instance of math/rand's Rand is not -// suitable for concurrent use by multiple goroutines. -// -// For concurrent use, call Reseed to reseed math/rand's default source and -// use math/rand's top-level convenience functions instead. -func NewRand() *mrand.Rand { - seed := crandSeed() - // nolint:gosec // G404: Use of weak random number generator - return mrand.New(mrand.NewSource(seed)) -} - -// Reseed conveniently re-seeds the default Source of math/rand with -// randomness obtained from crypto/rand. -// -// Note that this does not make math/rand suitable for cryptographic usage. -// -// Use math/rand's top-level convenience functions remain suitable -// for concurrent use by multiple goroutines. -func Reseed() { - seed := crandSeed() - mrand.Seed(seed) -} - // Str constructs a random alphanumeric string of given length // from math/rand's global default Source. -func Str(length int) string { +func Str(length int) string { return buildString(length, mrand.Int63) } + +// StrFromSource produces a random string of a specified length from +// the specified random source. +func StrFromSource(r *mrand.Rand, length int) string { return buildString(length, r.Int63) } + +func buildString(length int, picker func() int64) string { if length <= 0 { return "" } chars := make([]byte, 0, length) for { - // nolint:gosec // G404: Use of weak random number generator - val := mrand.Int63() + val := picker() for i := 0; i < 10; i++ { v := int(val & 0x3f) // rightmost 6 bits if v >= 62 { // only 62 characters in strChars @@ -78,12 +49,3 @@ func Bytes(n int) []byte { } return bs } - -func crandSeed() int64 { - var seed int64 - err := binary.Read(crand.Reader, binary.BigEndian, &seed) - if err != nil { - panic(fmt.Sprintf("could nor read random seed from crypto/rand: %v", err)) - } - return seed -} diff --git a/libs/service/service.go b/libs/service/service.go index 0af2439950..b36aa10876 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -1,8 +1,8 @@ package service import ( + "context" "errors" - "fmt" "sync/atomic" "github.com/tendermint/tendermint/libs/log" @@ -22,39 +22,33 @@ var ( // Service defines a service that can be started, stopped, and reset. type Service interface { - // Start the service. - // If it's already started or stopped, will return an error. - // If OnStart() returns an error, it's returned by Start() - Start() error - OnStart() error - - // Stop the service. - // If it's already stopped, will return an error. - // OnStop must never error. - Stop() error - OnStop() - - // Reset the service. - // Panics by default - must be overwritten to enable reset. - Reset() error - OnReset() error + // Start is called to start the service, which should run until + // the context terminates. If the service is already running, Start + // must report an error. + Start(context.Context) error // Return true if the service is running IsRunning() bool - // Quit returns a channel, which is closed once service is stopped. - Quit() <-chan struct{} - // String representation of the service String() string - // SetLogger sets a logger. - SetLogger(log.Logger) - // Wait blocks until the service is stopped. Wait() } +// Implementation describes the implementation that the +// BaseService implementation wraps. +type Implementation interface { + Service + + // Called by the Services Start Method + OnStart(context.Context) error + + // Called when the service's context is canceled. + OnStop() +} + /* Classical-inheritance-style service declarations. Services can be started, then stopped, then optionally restarted. @@ -85,7 +79,7 @@ Typical usage: return fs } - func (fs *FooService) OnStart() error { + func (fs *FooService) OnStart(ctx context.Context) error { fs.BaseService.OnStart() // Always call the overridden method. // initialize private fields // start subroutines, etc. @@ -98,111 +92,99 @@ Typical usage: } */ type BaseService struct { - Logger log.Logger + logger log.Logger name string started uint32 // atomic stopped uint32 // atomic quit chan struct{} // The "subclass" of BaseService - impl Service + impl Implementation } // NewBaseService creates a new BaseService. -func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { - if logger == nil { - logger = log.NewNopLogger() - } - +func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { return &BaseService{ - Logger: logger, + logger: logger, name: name, quit: make(chan struct{}), impl: impl, } } -// SetLogger implements Service by setting a logger. -func (bs *BaseService) SetLogger(l log.Logger) { - bs.Logger = l -} - -// Start implements Service by calling OnStart (if defined). An error will be -// returned if the service is already running or stopped. Not to start the -// stopped service, you need to call Reset. -func (bs *BaseService) Start() error { +// Start starts the Service and calls its OnStart method. An error will be +// returned if the service is already running or stopped. To restart a +// stopped service, call Reset. +func (bs *BaseService) Start(ctx context.Context) error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { - bs.Logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) + bs.logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) atomic.StoreUint32(&bs.started, 0) return ErrAlreadyStopped } - bs.Logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) + bs.logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) - if err := bs.impl.OnStart(); err != nil { + if err := bs.impl.OnStart(ctx); err != nil { // revert flag atomic.StoreUint32(&bs.started, 0) return err } + + go func(ctx context.Context) { + select { + case <-bs.quit: + // someone else explicitly called stop + // and then we shouldn't. + return + case <-ctx.Done(): + // if nothing is running, no need to + // shut down again. + if !bs.impl.IsRunning() { + return + } + + // the context was cancel and we + // should stop. + if err := bs.Stop(); err != nil { + bs.logger.Error("stopped service", + "err", err.Error(), + "service", bs.name, + "impl", bs.impl.String()) + } + + bs.logger.Info("stopped service", + "service", bs.name, + "impl", bs.impl.String()) + } + }(ctx) + return nil } - bs.Logger.Debug("not starting service; already started", "service", bs.name, "impl", bs.impl.String()) return ErrAlreadyStarted } -// OnStart implements Service by doing nothing. -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStart() -func (bs *BaseService) OnStart() error { return nil } - // Stop implements Service by calling OnStop (if defined) and closing quit // channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { if atomic.LoadUint32(&bs.started) == 0 { - bs.Logger.Error("not stopping service; not started yet", "service", bs.name, "impl", bs.impl.String()) + bs.logger.Error("not stopping service; not started yet", "service", bs.name, "impl", bs.impl.String()) atomic.StoreUint32(&bs.stopped, 0) return ErrNotStarted } - bs.Logger.Info("stopping service", "service", bs.name, "impl", bs.impl.String()) + bs.logger.Info("stopping service", "service", bs.name, "impl", bs.impl.String()) bs.impl.OnStop() close(bs.quit) return nil } - bs.Logger.Debug("not stopping service; already stopped", "service", bs.name, "impl", bs.impl.String()) return ErrAlreadyStopped } -// OnStop implements Service by doing nothing. -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStop() -func (bs *BaseService) OnStop() {} - -// Reset implements Service by calling OnReset callback (if defined). An error -// will be returned if the service is running. -func (bs *BaseService) Reset() error { - if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug("cannot reset service; not stopped", "service", bs.name, "impl", bs.impl.String()) - return fmt.Errorf("can't reset running %s", bs.name) - } - - // whether or not we've started, we can reset - atomic.CompareAndSwapUint32(&bs.started, 1, 0) - - bs.quit = make(chan struct{}) - return bs.impl.OnReset() -} - -// OnReset implements Service by panicking. -func (bs *BaseService) OnReset() error { - panic("The service cannot be reset") -} - // IsRunning implements Service by returning true or false depending on the // service's state. func (bs *BaseService) IsRunning() bool { @@ -210,16 +192,7 @@ func (bs *BaseService) IsRunning() bool { } // Wait blocks until the service is stopped. -func (bs *BaseService) Wait() { - <-bs.quit -} +func (bs *BaseService) Wait() { <-bs.quit } // String implements Service by returning a string representation of the service. -func (bs *BaseService) String() string { - return bs.name -} - -// Quit Implements Service by returning a quit channel. -func (bs *BaseService) Quit() <-chan struct{} { - return bs.quit -} +func (bs *BaseService) String() string { return bs.name } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fba..fcc727fcc4 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -1,24 +1,32 @@ package service import ( + "context" "testing" "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) type testService struct { BaseService } -func (testService) OnReset() error { +func (testService) OnStop() {} +func (testService) OnStart(context.Context) error { return nil } func TestBaseServiceWait(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() + ts.BaseService = *NewBaseService(logger, "TestService", ts) + err := ts.Start(ctx) require.NoError(t, err) waitFinished := make(chan struct{}) @@ -27,7 +35,7 @@ func TestBaseServiceWait(t *testing.T) { waitFinished <- struct{}{} }() - go ts.Stop() //nolint:errcheck // ignore for tests + go cancel() select { case <-waitFinished: @@ -36,22 +44,3 @@ func TestBaseServiceWait(t *testing.T) { t.Fatal("expected Wait() to finish within 100 ms.") } } - -func TestBaseServiceReset(t *testing.T) { - ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() - require.NoError(t, err) - - err = ts.Reset() - require.Error(t, err, "expected cant reset service error") - - err = ts.Stop() - require.NoError(t, err) - - err = ts.Reset() - require.NoError(t, err) - - err = ts.Start() - require.NoError(t, err) -} diff --git a/libs/strings/string.go b/libs/strings/string.go index b09c00063a..6cc0b18ee5 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -55,6 +55,10 @@ func SplitAndTrim(s, sep, cutset string) []string { return spl } +// TrimSpace removes all leading and trailing whitespace from the +// string. +func TrimSpace(s string) string { return strings.TrimSpace(s) } + // Returns true if s is a non-empty printable non-tab ascii character. func IsASCIIText(s string) bool { if len(s) == 0 { diff --git a/libs/sync/atomic_bool.go b/libs/sync/atomic_bool.go deleted file mode 100644 index 1a530b5968..0000000000 --- a/libs/sync/atomic_bool.go +++ /dev/null @@ -1,33 +0,0 @@ -package sync - -import "sync/atomic" - -// AtomicBool is an atomic Boolean. -// Its methods are all atomic, thus safe to be called by multiple goroutines simultaneously. -// Note: When embedding into a struct one should always use *AtomicBool to avoid copy. -// it's a simple implmentation from https://github.com/tevino/abool -type AtomicBool int32 - -// NewBool creates an AtomicBool with given default value. -func NewBool(ok bool) *AtomicBool { - ab := new(AtomicBool) - if ok { - ab.Set() - } - return ab -} - -// Set sets the Boolean to true. -func (ab *AtomicBool) Set() { - atomic.StoreInt32((*int32)(ab), 1) -} - -// UnSet sets the Boolean to false. -func (ab *AtomicBool) UnSet() { - atomic.StoreInt32((*int32)(ab), 0) -} - -// IsSet returns whether the Boolean is true. -func (ab *AtomicBool) IsSet() bool { - return atomic.LoadInt32((*int32)(ab))&1 == 1 -} diff --git a/libs/sync/atomic_bool_test.go b/libs/sync/atomic_bool_test.go deleted file mode 100644 index 9531815e8e..0000000000 --- a/libs/sync/atomic_bool_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDefaultValue(t *testing.T) { - t.Parallel() - v := NewBool(false) - assert.False(t, v.IsSet()) - - v = NewBool(true) - assert.True(t, v.IsSet()) -} - -func TestSetUnSet(t *testing.T) { - t.Parallel() - v := NewBool(false) - - v.Set() - assert.True(t, v.IsSet()) - - v.UnSet() - assert.False(t, v.IsSet()) -} diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go new file mode 100644 index 0000000000..a8e49b314e --- /dev/null +++ b/libs/time/mocks/source.go @@ -0,0 +1,28 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Source is an autogenerated mock type for the Source type +type Source struct { + mock.Mock +} + +// Now provides a mock function with given fields: +func (_m *Source) Now() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} diff --git a/libs/time/time.go b/libs/time/time.go index 786f9bbb42..7ab45d8f14 100644 --- a/libs/time/time.go +++ b/libs/time/time.go @@ -15,3 +15,17 @@ func Now() time.Time { func Canonical(t time.Time) time.Time { return t.Round(0).UTC() } + +//go:generate ../../scripts/mockery_generate.sh Source + +// Source is an interface that defines a way to fetch the current time. +type Source interface { + Now() time.Time +} + +// DefaultSource implements the Source interface using the system clock provided by the standard library. +type DefaultSource struct{} + +func (DefaultSource) Now() time.Time { + return Now() +} diff --git a/light/client.go b/light/client.go index cc606f496e..443b4d8224 100644 --- a/light/client.go +++ b/light/client.go @@ -9,11 +9,11 @@ import ( "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/store" + "github.com/tendermint/tendermint/types" ) @@ -134,7 +134,7 @@ type Client struct { providerTimeout time.Duration // Mutex for locking during changes of the light clients providers - providerMutex tmsync.Mutex + providerMutex sync.Mutex // Primary provider of new headers. primary provider.Provider // Providers used to "witness" new headers. @@ -168,7 +168,8 @@ func NewClient( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - options ...Option) (*Client, error) { + options ...Option, +) (*Client, error) { // Check whether the trusted store already has a trusted block. If so, then create // a new client from the trusted store instead of the trust options. @@ -1023,7 +1024,11 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) defer wg.Done() lb, err := c.witnesses[witnessIndex].LightBlock(subctx, height) - witnessResponsesC <- witnessResponse{lb, witnessIndex, err} + select { + case witnessResponsesC <- witnessResponse{lb, witnessIndex, err}: + case <-ctx.Done(): + } + }(index, witnessResponsesC) } @@ -1142,3 +1147,29 @@ func (c *Client) providerShouldBeRemoved(err error) bool { errors.As(err, &provider.ErrBadLightBlock{}) || errors.Is(err, provider.ErrConnectionClosed) } + +func (c *Client) Status(ctx context.Context) *types.LightClientInfo { + chunks := make([]string, len(c.witnesses)) + + // If primary is in witness list we do not want to count it twice in the number of peers + primaryNotInWitnessList := 1 + for i, val := range c.witnesses { + chunks[i] = val.ID() + if chunks[i] == c.primary.ID() { + primaryNotInWitnessList = 0 + } + } + + return &types.LightClientInfo{ + PrimaryID: c.primary.ID(), + WitnessesID: chunks, + NumPeers: len(chunks) + primaryNotInWitnessList, + LastTrustedHeight: c.latestTrustedBlock.Height, + LastTrustedHash: c.latestTrustedBlock.Hash(), + LatestBlockTime: c.latestTrustedBlock.Time, + TrustingPeriod: c.trustingPeriod.String(), + // The caller of /status can deduce this from the two variables above + // Having a boolean flag improves readbility + TrustedBlockExpired: HeaderExpired(c.latestTrustedBlock.SignedHeader, c.trustingPeriod, time.Now()), + } +} diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index 04ea6d1fc8..ca0e402a05 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -2,6 +2,7 @@ package light_test import ( "context" + "errors" "testing" "time" @@ -57,16 +58,25 @@ func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) } func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error { - panic("not implemented") + return errors.New("not implemented") } +// provierBenchmarkImpl does not have an ID iteself. +// Thus we return a sample string +func (impl *providerBenchmarkImpl) ID() string { return "ip-not-defined.com" } + func BenchmarkSequence(b *testing.B) { - headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + headers, vals, _ := genLightBlocksWithKeys(b, chainID, 1000, 100, 1, bTime) benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) - genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1) + genesisBlock, _ := benchmarkFullNode.LightBlock(ctx, 1) + + logger := log.NewTestingLogger(b) c, err := light.NewClient( - context.Background(), + ctx, chainID, light.TrustOptions{ Period: 24 * time.Hour, @@ -76,7 +86,7 @@ func BenchmarkSequence(b *testing.B) { benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), light.SequentialVerification(), ) if err != nil { @@ -85,7 +95,7 @@ func BenchmarkSequence(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyLightBlockAtHeight(context.Background(), 1000, bTime.Add(1000*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(ctx, 1000, bTime.Add(1000*time.Minute)) if err != nil { b.Fatal(err) } @@ -93,9 +103,14 @@ func BenchmarkSequence(b *testing.B) { } func BenchmarkBisection(b *testing.B) { - headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + headers, vals, _ := genLightBlocksWithKeys(b, chainID, 1000, 100, 1, bTime) benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) - genesisBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 1) + genesisBlock, _ := benchmarkFullNode.LightBlock(ctx, 1) + + logger := log.NewTestingLogger(b) c, err := light.NewClient( context.Background(), @@ -108,7 +123,7 @@ func BenchmarkBisection(b *testing.B) { benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) if err != nil { b.Fatal(err) @@ -116,7 +131,7 @@ func BenchmarkBisection(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyLightBlockAtHeight(context.Background(), 1000, bTime.Add(1000*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(ctx, 1000, bTime.Add(1000*time.Minute)) if err != nil { b.Fatal(err) } @@ -124,12 +139,17 @@ func BenchmarkBisection(b *testing.B) { } func BenchmarkBackwards(b *testing.B) { - headers, vals, _ := genLightBlocksWithKeys(chainID, 1000, 100, 1, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + headers, vals, _ := genLightBlocksWithKeys(b, chainID, 1000, 100, 1, bTime) benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) - trustedBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 0) + trustedBlock, _ := benchmarkFullNode.LightBlock(ctx, 0) + + logger := log.NewTestingLogger(b) c, err := light.NewClient( - context.Background(), + ctx, chainID, light.TrustOptions{ Period: 24 * time.Hour, @@ -139,7 +159,7 @@ func BenchmarkBackwards(b *testing.B) { benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) if err != nil { b.Fatal(err) @@ -147,9 +167,10 @@ func BenchmarkBackwards(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyLightBlockAtHeight(context.Background(), 1, bTime) + _, err = c.VerifyLightBlockAtHeight(ctx, 1, bTime) if err != nil { b.Fatal(err) } } + } diff --git a/light/client_test.go b/light/client_test.go index c7c974ee5b..bc585f7b8a 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -26,1067 +26,1091 @@ const ( chainID = "test" ) -var ( - ctx = context.Background() - keys = genPrivKeys(4) - vals = keys.ToValidators(20, 10) - bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - // 3/3 signed - h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) - // 3/3 signed - h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) - trustPeriod = 4 * time.Hour - trustOptions = light.TrustOptions{ - Period: 4 * time.Hour, - Height: 1, - Hash: h1.Hash(), - } - valSet = map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: vals, - 4: vals, - } - headerSet = map[int64]*types.SignedHeader{ - 1: h1, - // interim header (3/3 signed) - 2: h2, - // last header (3/3 signed) - 3: h3, - } - l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} - l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} - l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals} -) +var bTime time.Time -func TestValidateTrustOptions(t *testing.T) { - testCases := []struct { - err bool - to light.TrustOptions - }{ - { - false, - trustOptions, - }, - { - true, - light.TrustOptions{ - Period: -1 * time.Hour, - Height: 1, - Hash: h1.Hash(), - }, - }, - { - true, - light.TrustOptions{ - Period: 1 * time.Hour, - Height: 0, - Hash: h1.Hash(), - }, - }, - { - true, - light.TrustOptions{ - Period: 1 * time.Hour, - Height: 1, - Hash: []byte("incorrect hash"), - }, - }, +func init() { + var err error + bTime, err = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + if err != nil { + panic(err) } +} - for _, tc := range testCases { - err := tc.to.ValidateBasic() - if tc.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - } +func TestClient(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var ( + keys = genPrivKeys(4) + vals = keys.ToValidators(20, 10) + trustPeriod = 4 * time.Hour -} + valSet = map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: vals, + 4: vals, + } -func TestClient_SequentialVerification(t *testing.T) { - newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) - differentVals, _ := factory.RandValidatorSet(10, 100) - - testCases := []struct { - name string - otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet - initErr bool - verifyErr bool - }{ - { - "good", - headerSet, - valSet, - false, - false, - }, - { - "bad: different first header", - map[int64]*types.SignedHeader{ - // different header - 1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + h1 = keys.GenSignedHeader(t, chainID, 1, bTime, nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) + // 3/3 signed + h2 = keys.GenSignedHeaderLastBlockID(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) + // 3/3 signed + h3 = keys.GenSignedHeaderLastBlockID(t, chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) + trustOptions = light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: h1.Hash(), + } + headerSet = map[int64]*types.SignedHeader{ + 1: h1, + // interim header (3/3 signed) + 2: h2, + // last header (3/3 signed) + 3: h3, + } + l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} + l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals} + ) + t.Run("ValidateTrustOptions", func(t *testing.T) { + testCases := []struct { + err bool + to light.TrustOptions + }{ + { + false, + trustOptions, }, - map[int64]*types.ValidatorSet{ - 1: vals, + { + true, + light.TrustOptions{ + Period: -1 * time.Hour, + Height: 1, + Hash: h1.Hash(), + }, }, - true, - false, - }, - { - "bad: no first signed header", - map[int64]*types.SignedHeader{}, - map[int64]*types.ValidatorSet{ - 1: differentVals, + { + true, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 0, + Hash: h1.Hash(), + }, }, - true, - true, - }, - { - "bad: different first validator set", - map[int64]*types.SignedHeader{ - 1: h1, + { + true, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 1, + Hash: []byte("incorrect hash"), + }, }, - map[int64]*types.ValidatorSet{ - 1: differentVals, + } + + for idx, tc := range testCases { + t.Run(fmt.Sprint(idx), func(t *testing.T) { + err := tc.to.ValidateBasic() + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } + }) + t.Run("SequentialVerification", func(t *testing.T) { + newKeys := genPrivKeys(4) + newVals := newKeys.ToValidators(10, 1) + differentVals, _ := factory.ValidatorSet(ctx, t, 10, 100) + + testCases := []struct { + name string + otherHeaders map[int64]*types.SignedHeader // all except ^ + vals map[int64]*types.ValidatorSet + initErr bool + verifyErr bool + }{ + { + name: "good", + otherHeaders: headerSet, + vals: valSet, + initErr: false, + verifyErr: false, }, - true, - true, - }, - { - "bad: 1/3 signed interim header", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - // interim header (1/3 signed) - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), - // last header (3/3 signed) - 3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + { + "bad: different first header", + map[int64]*types.SignedHeader{ + // different header + 1: keys.GenSignedHeader(t, chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + }, + map[int64]*types.ValidatorSet{ + 1: vals, + }, + true, + false, }, - valSet, - false, - true, - }, - { - "bad: 1/3 signed last header", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - // interim header (3/3 signed) - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), - // last header (1/3 signed) - 3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), + { + "bad: no first signed header", + map[int64]*types.SignedHeader{}, + map[int64]*types.ValidatorSet{ + 1: differentVals, + }, + true, + true, }, - valSet, - false, - true, - }, - { - "bad: different validator set at height 3", - headerSet, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: newVals, + { + "bad: different first validator set", + map[int64]*types.SignedHeader{ + 1: h1, + }, + map[int64]*types.ValidatorSet{ + 1: differentVals, + }, + true, + true, }, - false, - true, - }, - } - - for _, tc := range testCases { - testCase := tc - t.Run(testCase.name, func(t *testing.T) { - mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals) - mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockNode, - []provider.Provider{mockNode}, - dbs.New(dbm.NewMemDB()), - light.SequentialVerification(), - light.Logger(log.TestingLogger()), - ) - - if testCase.initErr { - require.Error(t, err) - return - } - - require.NoError(t, err) - - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) - if testCase.verifyErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - mockNode.AssertExpectations(t) - }) - } -} - -func TestClient_SkippingVerification(t *testing.T) { - // required for 2nd test case - newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) - - // 1/3+ of vals, 2/3- of newVals - transitKeys := keys.Extend(3) - transitVals := transitKeys.ToValidators(10, 1) - - testCases := []struct { - name string - otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet - initErr bool - verifyErr bool - }{ - { - "good", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - // last header (3/3 signed) - 3: h3, + { + "bad: 1/3 signed interim header", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + // interim header (1/3 signed) + 2: keys.GenSignedHeader(t, chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), + // last header (3/3 signed) + 3: keys.GenSignedHeader(t, chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + }, + valSet, + false, + true, }, - valSet, - false, - false, - }, - { - "good, but val set changes by 2/3 (1/3 of vals is still present)", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - 3: transitKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(transitKeys)), + { + "bad: 1/3 signed last header", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + // interim header (3/3 signed) + 2: keys.GenSignedHeader(t, chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + // last header (1/3 signed) + 3: keys.GenSignedHeader(t, chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), + }, + valSet, + false, + true, }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: transitVals, + { + "bad: different validator set at height 3", + headerSet, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: newVals, + }, + false, + true, }, - false, - false, - }, - { - "good, but val set changes 100% at height 2", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - // interim header (3/3 signed) - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), - // last header (0/4 of the original val set signed) - 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), + } + + for _, tc := range testCases { + testCase := tc + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + mockNode := mockNodeFromHeadersAndVals(testCase.otherHeaders, testCase.vals) + mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockNode, + []provider.Provider{mockNode}, + dbs.New(dbm.NewMemDB()), + light.SequentialVerification(), + light.Logger(logger), + ) + + if testCase.initErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) + if testCase.verifyErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + mockNode.AssertExpectations(t) + }) + } + + }) + t.Run("SkippingVerification", func(t *testing.T) { + // required for 2nd test case + newKeys := genPrivKeys(4) + newVals := newKeys.ToValidators(10, 1) + + // 1/3+ of vals, 2/3- of newVals + transitKeys := keys.Extend(3) + transitVals := transitKeys.ToValidators(10, 1) + + testCases := []struct { + name string + otherHeaders map[int64]*types.SignedHeader // all except ^ + vals map[int64]*types.ValidatorSet + initErr bool + verifyErr bool + }{ + { + "good", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + // last header (3/3 signed) + 3: h3, + }, + valSet, + false, + false, }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: newVals, + { + "good, but val set changes by 2/3 (1/3 of vals is still present)", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + 3: transitKeys.GenSignedHeader(t, chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(transitKeys)), + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: transitVals, + }, + false, + false, }, - false, - false, - }, - { - "bad: last header signed by newVals, interim header has no signers", - map[int64]*types.SignedHeader{ - // trusted header - 1: h1, - // last header (0/4 of the original val set signed) - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, 0), - // last header (0/4 of the original val set signed) - 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), + { + "good, but val set changes 100% at height 2", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + // interim header (3/3 signed) + 2: keys.GenSignedHeader(t, chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), + // last header (0/4 of the original val set signed) + 3: newKeys.GenSignedHeader(t, chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: newVals, + }, + false, + false, }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: newVals, + { + "bad: last header signed by newVals, interim header has no signers", + map[int64]*types.SignedHeader{ + // trusted header + 1: h1, + // last header (0/4 of the original val set signed) + 2: keys.GenSignedHeader(t, chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, 0), + // last header (0/4 of the original val set signed) + 3: newKeys.GenSignedHeader(t, chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: newVals, + }, + false, + true, }, - false, - true, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals) - mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockNode, - []provider.Provider{mockNode}, - dbs.New(dbm.NewMemDB()), - light.SkippingVerification(light.DefaultTrustLevel), - light.Logger(log.TestingLogger()), - ) - if tc.initErr { - require.Error(t, err) - return - } - - require.NoError(t, err) - - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) - if tc.verifyErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } - -} - -// start from a large light block to make sure that the pivot height doesn't select a height outside -// the appropriate range -func TestClientLargeBisectionVerification(t *testing.T) { - numBlocks := int64(300) - mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, numBlocks, 101, 2, bTime) - - lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]} - mockNode := &provider_mocks.Provider{} - mockNode.On("LightBlock", mock.Anything, numBlocks). - Return(lastBlock, nil) + } - mockNode.On("LightBlock", mock.Anything, int64(200)). - Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil) + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + logger := log.NewTestingLogger(t) + + mockNode := mockNodeFromHeadersAndVals(tc.otherHeaders, tc.vals) + mockNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockNode, + []provider.Provider{mockNode}, + dbs.New(dbm.NewMemDB()), + light.SkippingVerification(light.DefaultTrustLevel), + light.Logger(logger), + ) + if tc.initErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) + if tc.verifyErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } - mockNode.On("LightBlock", mock.Anything, int64(256)). - Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil) + }) + t.Run("LargeBisectionVerification", func(t *testing.T) { + // start from a large light block to make sure that the pivot height doesn't select a height outside + // the appropriate range - mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) + numBlocks := int64(300) + mockHeaders, mockVals, _ := genLightBlocksWithKeys(t, chainID, numBlocks, 101, 2, bTime) - trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200)) - require.NoError(t, err) - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 4 * time.Hour, - Height: trustedLightBlock.Height, - Hash: trustedLightBlock.Hash(), - }, - mockNode, - []provider.Provider{mockNode}, - dbs.New(dbm.NewMemDB()), - light.SkippingVerification(light.DefaultTrustLevel), - ) - require.NoError(t, err) - h, err := c.Update(ctx, bTime.Add(300*time.Minute)) - assert.NoError(t, err) - height, err := c.LastTrustedHeight() - require.NoError(t, err) - require.Equal(t, numBlocks, height) - h2, err := mockNode.LightBlock(ctx, numBlocks) - require.NoError(t, err) - assert.Equal(t, h, h2) - mockNode.AssertExpectations(t) -} - -func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { - mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 4 * time.Hour, - Height: 1, - Hash: h1.Hash(), - }, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.SkippingVerification(light.DefaultTrustLevel), - ) - require.NoError(t, err) + lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]} + mockNode := &provider_mocks.Provider{} + mockNode.On("LightBlock", mock.Anything, numBlocks). + Return(lastBlock, nil) - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - require.NoError(t, err) + mockNode.On("LightBlock", mock.Anything, int64(200)). + Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil) - // confirm that the client already doesn't have the light block - _, err = c.TrustedLightBlock(2) - require.Error(t, err) + mockNode.On("LightBlock", mock.Anything, int64(256)). + Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil) - // verify using bisection the light block between the two trusted light blocks - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) - assert.NoError(t, err) - mockFullNode.AssertExpectations(t) -} + mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) -func TestClient_Cleanup(t *testing.T) { - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - _, err = c.TrustedLightBlock(1) - require.NoError(t, err) - - err = c.Cleanup() - require.NoError(t, err) - - // Check no light blocks exist after Cleanup. - l, err := c.TrustedLightBlock(1) - assert.Error(t, err) - assert.Nil(t, l) - mockFullNode.AssertExpectations(t) -} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -// trustedHeader.Height == options.Height -func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { - // 1. options.Hash == trustedHeader.Hash - t.Run("hashes should match", func(t *testing.T) { - mockNode := &provider_mocks.Provider{} - trustedStore := dbs.New(dbm.NewMemDB()) - err := trustedStore.SaveLightBlock(l1) + trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200)) require.NoError(t, err) - c, err := light.NewClient( ctx, chainID, - trustOptions, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: trustedLightBlock.Height, + Hash: trustedLightBlock.Hash(), + }, mockNode, []provider.Provider{mockNode}, - trustedStore, - light.Logger(log.TestingLogger()), + dbs.New(dbm.NewMemDB()), + light.SkippingVerification(light.DefaultTrustLevel), ) require.NoError(t, err) - - l, err := c.TrustedLightBlock(1) + h, err := c.Update(ctx, bTime.Add(300*time.Minute)) assert.NoError(t, err) - assert.NotNil(t, l) - assert.Equal(t, l.Hash(), h1.Hash()) - assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) + height, err := c.LastTrustedHeight() + require.NoError(t, err) + require.Equal(t, numBlocks, height) + h2, err := mockNode.LightBlock(ctx, numBlocks) + require.NoError(t, err) + assert.Equal(t, h, h2) mockNode.AssertExpectations(t) }) + t.Run("BisectionBetweenTrustedHeaders", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // 2. options.Hash != trustedHeader.Hash - t.Run("hashes should not match", func(t *testing.T) { - trustedStore := dbs.New(dbm.NewMemDB()) - err := trustedStore.SaveLightBlock(l1) - require.NoError(t, err) - - // header1 != h1 - header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - mockNode := &provider_mocks.Provider{} - + mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) c, err := light.NewClient( ctx, chainID, light.TrustOptions{ Period: 4 * time.Hour, Height: 1, - Hash: header1.Hash(), + Hash: h1.Hash(), }, - mockNode, - []provider.Provider{mockNode}, - trustedStore, - light.Logger(log.TestingLogger()), + mockFullNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.SkippingVerification(light.DefaultTrustLevel), ) require.NoError(t, err) - l, err := c.TrustedLightBlock(1) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + require.NoError(t, err) + + // confirm that the client already doesn't have the light block + _, err = c.TrustedLightBlock(2) + require.Error(t, err) + + // verify using bisection the light block between the two trusted light blocks + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) assert.NoError(t, err) - if assert.NotNil(t, l) { - // client take the trusted store and ignores the trusted options - assert.Equal(t, l.Hash(), l1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) - } - mockNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) }) -} + t.Run("Cleanup", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) -func TestClient_Update(t *testing.T) { - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - - // should result in downloading & verifying header #3 - l, err := c.Update(ctx, bTime.Add(2*time.Hour)) - assert.NoError(t, err) - if assert.NotNil(t, l) { - assert.EqualValues(t, 3, l.Height) - assert.NoError(t, l.ValidateBasic(chainID)) - } - mockFullNode.AssertExpectations(t) -} + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) + _, err = c.TrustedLightBlock(1) + require.NoError(t, err) -func TestClient_Concurrency(t *testing.T) { - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) + err = c.Cleanup() + require.NoError(t, err) - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) - require.NoError(t, err) + // Check no light blocks exist after Cleanup. + l, err := c.TrustedLightBlock(1) + assert.Error(t, err) + assert.Nil(t, l) + mockFullNode.AssertExpectations(t) + }) + t.Run("RestoresTrustedHeaderAfterStartup", func(t *testing.T) { + // trustedHeader.Height == options.Height - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() - // NOTE: Cleanup, Stop, VerifyLightBlockAtHeight and Verify are not supposed - // to be concurrently safe. + // 1. options.Hash == trustedHeader.Hash + t.Run("hashes should match", func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() - assert.Equal(t, chainID, c.ChainID()) + logger := log.NewTestingLogger(t) - _, err := c.LastTrustedHeight() - assert.NoError(t, err) + mockNode := &provider_mocks.Provider{} + trustedStore := dbs.New(dbm.NewMemDB()) + err := trustedStore.SaveLightBlock(l1) + require.NoError(t, err) - _, err = c.FirstTrustedHeight() - assert.NoError(t, err) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockNode, + []provider.Provider{mockNode}, + trustedStore, + light.Logger(logger), + ) + require.NoError(t, err) l, err := c.TrustedLightBlock(1) assert.NoError(t, err) assert.NotNil(t, l) - }() - } + assert.Equal(t, l.Hash(), h1.Hash()) + assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) + mockNode.AssertExpectations(t) + }) - wg.Wait() - mockFullNode.AssertExpectations(t) -} + // 2. options.Hash != trustedHeader.Hash + t.Run("hashes should not match", func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() -func TestClient_AddProviders(t *testing.T) { - mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{ - 1: h1, - 2: h2, - }, valSet) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) + trustedStore := dbs.New(dbm.NewMemDB()) + err := trustedStore.SaveLightBlock(l1) + require.NoError(t, err) + + logger := log.NewTestingLogger(t) + + // header1 != h1 + header1 := keys.GenSignedHeader(t, chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) + mockNode := &provider_mocks.Provider{} + + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: header1.Hash(), + }, + mockNode, + []provider.Provider{mockNode}, + trustedStore, + light.Logger(logger), + ) + require.NoError(t, err) + + l, err := c.TrustedLightBlock(1) + assert.NoError(t, err) + if assert.NotNil(t, l) { + // client take the trusted store and ignores the trusted options + assert.Equal(t, l.Hash(), l1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) + } + mockNode.AssertExpectations(t) + }) + }) + t.Run("Update", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil) + + logger := log.NewTestingLogger(t) + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) + + // should result in downloading & verifying header #3 + l, err := c.Update(ctx, bTime.Add(2*time.Hour)) + assert.NoError(t, err) + if assert.NotNil(t, l) { + assert.EqualValues(t, 3, l.Height) + assert.NoError(t, l.ValidateBasic(chainID)) + } + mockFullNode.AssertExpectations(t) + }) + + t.Run("Concurrency", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) - closeCh := make(chan struct{}) - go func() { - // run verification concurrently to make sure it doesn't dead lock _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) require.NoError(t, err) - close(closeCh) - }() - - // NOTE: the light client doesn't check uniqueness of providers - c.AddProvider(mockFullNode) - require.Len(t, c.Witnesses(), 2) - select { - case <-closeCh: - case <-time.After(5 * time.Second): - t.Fatal("concurent light block verification failed to finish in 5s") - } - mockFullNode.AssertExpectations(t) -} -func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) - - mockDeadNode := &provider_mocks.Provider{} - mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockDeadNode, - []provider.Provider{mockDeadNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() - require.NoError(t, err) - _, err = c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) + // NOTE: Cleanup, Stop, VerifyLightBlockAtHeight and Verify are not supposed + // to be concurrently safe. - // the primary should no longer be the deadNode - assert.NotEqual(t, c.Primary(), mockDeadNode) + assert.Equal(t, chainID, c.ChainID()) - // we should still have the dead node as a witness because it - // hasn't repeatedly been unresponsive yet - assert.Equal(t, 2, len(c.Witnesses())) - mockDeadNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + _, err := c.LastTrustedHeight() + assert.NoError(t, err) -func TestClientReplacesPrimaryWithWitnessIfPrimaryDoesntHaveBlock(t *testing.T) { - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) - - mockDeadNode := &provider_mocks.Provider{} - mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockDeadNode, - []provider.Provider{mockDeadNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - _, err = c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) - - // we should still have the dead node as a witness because it - // hasn't repeatedly been unresponsive yet - assert.Equal(t, 2, len(c.Witnesses())) - mockDeadNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + _, err = c.FirstTrustedHeight() + assert.NoError(t, err) + + l, err := c.TrustedLightBlock(1) + assert.NoError(t, err) + assert.NotNil(t, l) + }() + } -func TestClient_BackwardsVerification(t *testing.T) { - { - headers, vals, _ := genLightBlocksWithKeys(chainID, 9, 3, 0, bTime) - delete(headers, 1) - delete(headers, 2) - delete(vals, 1) - delete(vals, 2) - mockLargeFullNode := mockNodeFromHeadersAndVals(headers, vals) - trustHeader, _ := mockLargeFullNode.LightBlock(ctx, 6) + wg.Wait() + mockFullNode.AssertExpectations(t) + }) + t.Run("AddProviders", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + }, valSet) + logger := log.NewTestingLogger(t) c, err := light.NewClient( ctx, chainID, - light.TrustOptions{ - Period: 4 * time.Minute, - Height: trustHeader.Height, - Hash: trustHeader.Hash(), - }, - mockLargeFullNode, - []provider.Provider{mockLargeFullNode}, + trustOptions, + mockFullNode, + []provider.Provider{mockFullNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) - // 1) verify before the trusted header using backwards => expect no error - h, err := c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) - require.NoError(t, err) - if assert.NotNil(t, h) { - assert.EqualValues(t, 5, h.Height) + closeCh := make(chan struct{}) + go func() { + // run verification concurrently to make sure it doesn't dead lock + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) + require.NoError(t, err) + close(closeCh) + }() + + // NOTE: the light client doesn't check uniqueness of providers + c.AddProvider(mockFullNode) + require.Len(t, c.Witnesses(), 2) + select { + case <-closeCh: + case <-time.After(5 * time.Second): + t.Fatal("concurent light block verification failed to finish in 5s") } + mockFullNode.AssertExpectations(t) + }) + t.Run("ReplacesPrimaryWithWitnessIfPrimaryIsUnavailable", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // 2) untrusted header is expired but trusted header is not => expect no error - h, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(8*time.Minute)) - assert.NoError(t, err) - assert.NotNil(t, h) + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) - // 3) already stored headers should return the header without error - h, err = c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) - assert.NoError(t, err) - assert.NotNil(t, h) + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) + + logger := log.NewTestingLogger(t) + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockDeadNode, + []provider.Provider{mockDeadNode, mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) - // 4a) First verify latest header - _, err = c.VerifyLightBlockAtHeight(ctx, 9, bTime.Add(9*time.Minute)) + require.NoError(t, err) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) require.NoError(t, err) - // 4b) Verify backwards using bisection => expect no error - _, err = c.VerifyLightBlockAtHeight(ctx, 7, bTime.Add(9*time.Minute)) - assert.NoError(t, err) - // shouldn't have verified this header in the process - _, err = c.TrustedLightBlock(8) - assert.Error(t, err) + // the primary should no longer be the deadNode + assert.NotEqual(t, c.Primary(), mockDeadNode) - // 5) Try bisection method, but closest header (at 7) has expired - // so expect error - _, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute)) - assert.Error(t, err) - mockLargeFullNode.AssertExpectations(t) + // we should still have the dead node as a witness because it + // hasn't repeatedly been unresponsive yet + assert.Equal(t, 2, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("ReplacesPrimaryWithWitnessIfPrimaryDoesntHaveBlock", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - } - { - // 8) provides incorrect hash - headers := map[int64]*types.SignedHeader{ - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), - 3: h3, - } - vals := valSet - mockNode := mockNodeFromHeadersAndVals(headers, vals) + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) + + logger := log.NewTestingLogger(t) + + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) c, err := light.NewClient( ctx, chainID, - light.TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), - }, - mockNode, - []provider.Provider{mockNode}, + trustOptions, + mockDeadNode, + []provider.Provider{mockDeadNode, mockFullNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) - assert.Error(t, err) - mockNode.AssertExpectations(t) - } -} + // we should still have the dead node as a witness because it + // hasn't repeatedly been unresponsive yet + assert.Equal(t, 2, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("BackwardsVerification", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) -func TestClient_NewClientFromTrustedStore(t *testing.T) { - // 1) Initiate DB and fill with a "trusted" header - db := dbs.New(dbm.NewMemDB()) - err := db.SaveLightBlock(l1) - require.NoError(t, err) - mockNode := &provider_mocks.Provider{} - - c, err := light.NewClientFromTrustedStore( - chainID, - trustPeriod, - mockNode, - []provider.Provider{mockNode}, - db, - ) - require.NoError(t, err) + { + headers, vals, _ := genLightBlocksWithKeys(t, chainID, 9, 3, 0, bTime) + delete(headers, 1) + delete(headers, 2) + delete(vals, 1) + delete(vals, 2) + mockLargeFullNode := mockNodeFromHeadersAndVals(headers, vals) + trustHeader, _ := mockLargeFullNode.LightBlock(ctx, 6) - // 2) Check light block exists - h, err := c.TrustedLightBlock(1) - assert.NoError(t, err) - assert.EqualValues(t, l1.Height, h.Height) - mockNode.AssertExpectations(t) -} + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Minute, + Height: trustHeader.Height, + Hash: trustHeader.Hash(), + }, + mockLargeFullNode, + []provider.Provider{mockLargeFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) -func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { - // different headers hash then primary plus less than 1/3 signed (no fork) - headers1 := map[int64]*types.SignedHeader{ - 1: h1, - 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash2"), hash("cons_hash"), hash("results_hash"), - len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), - } - vals1 := map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - } - mockBadNode1 := mockNodeFromHeadersAndVals(headers1, vals1) - mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + // 1) verify before the trusted header using backwards => expect no error + h, err := c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) + require.NoError(t, err) + if assert.NotNil(t, h) { + assert.EqualValues(t, 5, h.Height) + } - // header is empty - headers2 := map[int64]*types.SignedHeader{ - 1: h1, - 2: h2, - } - vals2 := map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - } - mockBadNode2 := mockNodeFromHeadersAndVals(headers2, vals2) - mockBadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - - mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) - - lb1, _ := mockBadNode1.LightBlock(ctx, 2) - require.NotEqual(t, lb1.Hash(), l1.Hash()) - - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockBadNode1, mockBadNode2}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - // witness should have behaved properly -> no error - require.NoError(t, err) - assert.EqualValues(t, 2, len(c.Witnesses())) - - // witness behaves incorrectly -> removed from list, no error - l, err := c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) - assert.NoError(t, err) - assert.EqualValues(t, 1, len(c.Witnesses())) - // light block should still be verified - assert.EqualValues(t, 2, l.Height) - - // remaining witnesses don't have light block -> error - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - if assert.Error(t, err) { - assert.Equal(t, light.ErrFailedHeaderCrossReferencing, err) - } - // witness does not have a light block -> left in the list - assert.EqualValues(t, 1, len(c.Witnesses())) - mockBadNode1.AssertExpectations(t) - mockBadNode2.AssertExpectations(t) -} + // 2) untrusted header is expired but trusted header is not => expect no error + h, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(8*time.Minute)) + assert.NoError(t, err) + assert.NotNil(t, h) + + // 3) already stored headers should return the header without error + h, err = c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) + assert.NoError(t, err) + assert.NotNil(t, h) + + // 4a) First verify latest header + _, err = c.VerifyLightBlockAtHeight(ctx, 9, bTime.Add(9*time.Minute)) + require.NoError(t, err) + + // 4b) Verify backwards using bisection => expect no error + _, err = c.VerifyLightBlockAtHeight(ctx, 7, bTime.Add(9*time.Minute)) + assert.NoError(t, err) + // shouldn't have verified this header in the process + _, err = c.TrustedLightBlock(8) + assert.Error(t, err) + + // 5) Try bisection method, but closest header (at 7) has expired + // so expect error + _, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute)) + assert.Error(t, err) + mockLargeFullNode.AssertExpectations(t) + + } + { + // 8) provides incorrect hash + headers := map[int64]*types.SignedHeader{ + 2: keys.GenSignedHeader(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), + 3: h3, + } + vals := valSet + mockNode := mockNodeFromHeadersAndVals(headers, vals) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 3, + Hash: h3.Hash(), + }, + mockNode, + []provider.Provider{mockNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) + assert.Error(t, err) + mockNode.AssertExpectations(t) + } + }) + t.Run("NewClientFromTrustedStore", func(t *testing.T) { + // 1) Initiate DB and fill with a "trusted" header + db := dbs.New(dbm.NewMemDB()) + err := db.SaveLightBlock(l1) + require.NoError(t, err) + mockNode := &provider_mocks.Provider{} + + c, err := light.NewClientFromTrustedStore( + chainID, + trustPeriod, + mockNode, + []provider.Provider{mockNode}, + db, + ) + require.NoError(t, err) -func TestClient_TrustedValidatorSet(t *testing.T) { - differentVals, _ := factory.RandValidatorSet(10, 100) - mockBadValSetNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ + // 2) Check light block exists + h, err := c.TrustedLightBlock(1) + assert.NoError(t, err) + assert.EqualValues(t, l1.Height, h.Height) + mockNode.AssertExpectations(t) + }) + t.Run("RemovesWitnessIfItSendsUsIncorrectHeader", func(t *testing.T) { + logger := log.NewTestingLogger(t) + + // different headers hash then primary plus less than 1/3 signed (no fork) + headers1 := map[int64]*types.SignedHeader{ 1: h1, - // 3/3 signed, but validator set at height 2 below is invalid -> witness - // should be removed. - 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + 2: keys.GenSignedHeaderLastBlockID(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), - 0, len(keys), types.BlockID{Hash: h1.Hash()}), - }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: differentVals, - }) - mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ - 1: h1, - 2: h2, - }, - map[int64]*types.ValidatorSet{ + len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), + } + vals1 := map[int64]*types.ValidatorSet{ 1: vals, 2: vals, - }) - - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockBadValSetNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - assert.Equal(t, 2, len(c.Witnesses())) - - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) - assert.NoError(t, err) - assert.Equal(t, 1, len(c.Witnesses())) - mockBadValSetNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + } + mockBadNode1 := mockNodeFromHeadersAndVals(headers1, vals1) + mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) -func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { - mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ + // header is empty + headers2 := map[int64]*types.SignedHeader{ 1: h1, - 3: h3, - 0: h3, - }, - map[int64]*types.ValidatorSet{ + 2: h2, + } + vals2 := map[int64]*types.ValidatorSet{ 1: vals, - 3: vals, - 0: vals, - }) + 2: vals, + } + mockBadNode2 := mockNodeFromHeadersAndVals(headers2, vals2) + mockBadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - light.PruningSize(1), - ) - require.NoError(t, err) - _, err = c.TrustedLightBlock(1) - require.NoError(t, err) + mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) - h, err := c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) - require.Equal(t, int64(3), h.Height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, err = c.TrustedLightBlock(1) - assert.Error(t, err) - mockFullNode.AssertExpectations(t) -} + lb1, _ := mockBadNode1.LightBlock(ctx, 2) + require.NotEqual(t, lb1.Hash(), l1.Hash()) -func TestClientEnsureValidHeadersAndValSets(t *testing.T) { - emptyValSet := &types.ValidatorSet{ - Validators: nil, - Proposer: nil, - } + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockBadNode1, mockBadNode2}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + // witness should have behaved properly -> no error + require.NoError(t, err) + assert.EqualValues(t, 2, len(c.Witnesses())) - testCases := []struct { - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + // witness behaves incorrectly -> removed from list, no error + l, err := c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) + assert.NoError(t, err) + assert.EqualValues(t, 1, len(c.Witnesses())) + // light block should still be verified + assert.EqualValues(t, 2, l.Height) + + // remaining witnesses don't have light block -> error + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrFailedHeaderCrossReferencing, err) + } + // witness does not have a light block -> left in the list + assert.EqualValues(t, 1, len(c.Witnesses())) + mockBadNode1.AssertExpectations(t) + mockBadNode2.AssertExpectations(t) + }) + t.Run("TrustedValidatorSet", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - errorToThrow error - errorHeight int64 + logger := log.NewTestingLogger(t) - err bool - }{ - { - headers: map[int64]*types.SignedHeader{ + differentVals, _ := factory.ValidatorSet(ctx, t, 10, 100) + mockBadValSetNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, - 3: h3, + // 3/3 signed, but validator set at height 2 below is invalid -> witness + // should be removed. + 2: keys.GenSignedHeaderLastBlockID(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), + 0, len(keys), types.BlockID{Hash: h1.Hash()}), }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, - 3: vals, - }, - err: false, - }, - { - headers: map[int64]*types.SignedHeader{ + 2: differentVals, + }) + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, + 2: h2, }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, - }, - errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, - errorHeight: 3, - err: true, - }, - { - headers: map[int64]*types.SignedHeader{ - 1: h1, - }, - errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, - errorHeight: 3, - vals: valSet, - err: true, - }, - { - headers: map[int64]*types.SignedHeader{ + 2: vals, + }) + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockBadValSetNode, mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + ) + require.NoError(t, err) + assert.Equal(t, 2, len(c.Witnesses())) + + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.Witnesses())) + mockBadValSetNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("PrunesHeadersAndValidatorSets", func(t *testing.T) { + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, 3: h3, + 0: h3, }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, - 3: emptyValSet, - }, - err: true, - }, - } - - for i, tc := range testCases { - testCase := tc - t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) { - mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals) - if testCase.errorToThrow != nil { - mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow) - } + 3: vals, + 0: vals, + }) - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockBadNode, - []provider.Provider{mockBadNode, mockBadNode}, - dbs.New(dbm.NewMemDB()), - ) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - if testCase.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - mockBadNode.AssertExpectations(t) - }) - } + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockFullNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(logger), + light.PruningSize(1), + ) + require.NoError(t, err) + _, err = c.TrustedLightBlock(1) + require.NoError(t, err) -} + h, err := c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) + require.Equal(t, int64(3), h.Height) -func TestClientHandlesContexts(t *testing.T) { - mockNode := &provider_mocks.Provider{} - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == nil }), - int64(1)).Return(l1, nil) - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.DeadlineExceeded }), - mock.Anything).Return(nil, context.DeadlineExceeded) - - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.Canceled }), - mock.Anything).Return(nil, context.Canceled) - - // instantiate the light client with a timeout - ctxTimeOut, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) - defer cancel() - _, err := light.NewClient( - ctxTimeOut, - chainID, - trustOptions, - mockNode, - []provider.Provider{mockNode, mockNode}, - dbs.New(dbm.NewMemDB()), - ) - require.Error(t, ctxTimeOut.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - // instantiate the client for real - c, err := light.NewClient( - ctx, - chainID, - trustOptions, - mockNode, - []provider.Provider{mockNode, mockNode}, - dbs.New(dbm.NewMemDB()), - ) - require.NoError(t, err) + _, err = c.TrustedLightBlock(1) + assert.Error(t, err) + mockFullNode.AssertExpectations(t) + }) + t.Run("EnsureValidHeadersAndValSets", func(t *testing.T) { + emptyValSet := &types.ValidatorSet{ + Validators: nil, + Proposer: nil, + } - // verify a block with a timeout - ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) - defer cancel() - _, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute)) - require.Error(t, ctxTimeOutBlock.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - // verify a block with a cancel - ctxCancel, cancel := context.WithCancel(ctx) - cancel() - _, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute)) - require.Error(t, ctxCancel.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.Canceled)) - mockNode.AssertExpectations(t) + testCases := []struct { + headers map[int64]*types.SignedHeader + vals map[int64]*types.ValidatorSet + + errorToThrow error + errorHeight int64 + + err bool + }{ + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + 3: vals, + }, + err: false, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + vals: valSet, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + 3: emptyValSet, + }, + err: true, + }, + } + for i, tc := range testCases { + testCase := tc + t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals) + if testCase.errorToThrow != nil { + mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow) + } + + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockBadNode, + []provider.Provider{mockBadNode, mockBadNode}, + dbs.New(dbm.NewMemDB()), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if testCase.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + mockBadNode.AssertExpectations(t) + }) + } + }) } diff --git a/light/detector.go b/light/detector.go index ddb0bc4ed4..0e5303acba 100644 --- a/light/detector.go +++ b/light/detector.go @@ -257,7 +257,7 @@ func (c *Client) handleConflictingHeaders( now, ) if err != nil { - c.logger.Info("Error validating primary's divergent header", "primary", c.primary, "err", err) + c.logger.Info("error validating primary's divergent header", "primary", c.primary, "err", err) return ErrLightClientAttack } diff --git a/light/detector_test.go b/light/detector_test.go index 0bf96ace69..84b6f210c1 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -2,6 +2,7 @@ package light_test import ( "bytes" + "context" "testing" "time" @@ -20,6 +21,8 @@ import ( ) func TestLightClientAttackEvidence_Lunatic(t *testing.T) { + logger := log.NewTestingLogger(t) + // primary performs a lunatic attack var ( latestHeight = int64(3) @@ -29,7 +32,10 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) ) - witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(t, chainID, latestHeight, valSize, 2, bTime) forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) forgedVals := forgedKeys.ToValidators(2, 0) @@ -40,7 +46,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { primaryValidators[height] = witnessValidators[height] continue } - primaryHeaders[height] = forgedKeys.GenSignedHeader(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + primaryHeaders[height] = forgedKeys.GenSignedHeader(t, chainID, height, bTime.Add(time.Duration(height)*time.Minute), nil, forgedVals, forgedVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(forgedKeys)) primaryValidators[height] = forgedVals } @@ -88,7 +94,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { mockPrimary, []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -128,9 +134,17 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { }, } + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for _, tc := range cases { testCase := tc t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + + logger := log.NewTestingLogger(t) + // primary performs an equivocation attack var ( valSize = 5 @@ -138,7 +152,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { // validators don't change in this network (however we still use a map just for convenience) primaryValidators = make(map[int64]*types.ValidatorSet, testCase.latestHeight) ) - witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(t, chainID, testCase.latestHeight+1, valSize, 2, bTime) for height := int64(1); height <= testCase.latestHeight; height++ { if height < testCase.divergenceHeight { @@ -148,7 +162,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { } // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for // a different block (which we do by adding txs) - primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, + primaryHeaders[height] = chainKeys[height].GenSignedHeader(t, chainID, height, bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, witnessValidators[height], witnessValidators[height+1], hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) @@ -199,7 +213,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { mockPrimary, []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), testCase.lightOption, ) require.NoError(t, err) @@ -228,7 +242,11 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight) ) - witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(t, chainID, latestHeight, valSize, 2, bTime) for _, unusedHeader := range []int64{3, 5, 6, 8} { delete(witnessHeaders, unusedHeader) } @@ -244,7 +262,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { } forgedKeys := chainKeys[latestHeight].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) primaryValidators[forgedHeight] = forgedKeys.ToValidators(2, 0) - primaryHeaders[forgedHeight] = forgedKeys.GenSignedHeader( + primaryHeaders[forgedHeight] = forgedKeys.GenSignedHeader(t, chainID, forgedHeight, bTime.Add(time.Duration(latestHeight+1)*time.Minute), // 11 mins @@ -298,7 +316,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { mockPrimary, []provider.Provider{mockWitness, accomplice}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), light.MaxClockDrift(1*time.Second), light.MaxBlockLag(1*time.Second), ) @@ -308,7 +326,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { // to prove that there was an attack vals := chainKeys[latestHeight].ToValidators(2, 0) newLb := &types.LightBlock{ - SignedHeader: chainKeys[latestHeight].GenSignedHeader( + SignedHeader: chainKeys[latestHeight].GenSignedHeader(t, chainID, proofHeight, bTime.Add(time.Duration(proofHeight+1)*time.Minute), // 12 mins @@ -358,7 +376,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { mockPrimary, []provider.Provider{mockLaggingWitness, accomplice}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), light.MaxClockDrift(1*time.Second), light.MaxBlockLag(1*time.Second), ) @@ -374,13 +392,18 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { // => light client returns an error upon creation because primary and witness // have a different view. func TestClientDivergentTraces1(t *testing.T) { - headers, vals, _ := genLightBlocksWithKeys(chainID, 1, 5, 2, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + headers, vals, _ := genLightBlocksWithKeys(t, chainID, 1, 5, 2, bTime) mockPrimary := mockNodeFromHeadersAndVals(headers, vals) firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - headers, vals, _ = genLightBlocksWithKeys(chainID, 1, 5, 2, bTime) + headers, vals, _ = genLightBlocksWithKeys(t, chainID, 1, 5, 2, bTime) mockWitness := mockNodeFromHeadersAndVals(headers, vals) + logger := log.NewTestingLogger(t) + _, err = light.NewClient( ctx, chainID, @@ -392,7 +415,7 @@ func TestClientDivergentTraces1(t *testing.T) { mockPrimary, []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.Error(t, err) assert.Contains(t, err.Error(), "does not match primary") @@ -403,7 +426,11 @@ func TestClientDivergentTraces1(t *testing.T) { // 2. Two out of three nodes don't respond but the third has a header that matches // => verification should be successful and all the witnesses should remain func TestClientDivergentTraces2(t *testing.T) { - headers, vals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + headers, vals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime) mockPrimaryNode := mockNodeFromHeadersAndVals(headers, vals) mockDeadNode := &provider_mocks.Provider{} mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) @@ -420,7 +447,7 @@ func TestClientDivergentTraces2(t *testing.T) { mockPrimaryNode, []provider.Provider{mockDeadNode, mockDeadNode, mockPrimaryNode}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -435,14 +462,19 @@ func TestClientDivergentTraces2(t *testing.T) { // => creation should succeed, but the verification should fail //nolint: dupl func TestClientDivergentTraces3(t *testing.T) { + logger := log.NewTestingLogger(t) + // - primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime) mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - mockHeaders, mockVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + mockHeaders, mockVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime) mockHeaders[1] = primaryHeaders[1] mockVals[1] = primaryVals[1] mockWitness := mockNodeFromHeadersAndVals(mockHeaders, mockVals) @@ -458,7 +490,7 @@ func TestClientDivergentTraces3(t *testing.T) { mockPrimary, []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -473,14 +505,19 @@ func TestClientDivergentTraces3(t *testing.T) { // It should be ignored //nolint: dupl func TestClientDivergentTraces4(t *testing.T) { + logger := log.NewTestingLogger(t) + // - primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + primaryHeaders, primaryVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime) mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + firstBlock, err := mockPrimary.LightBlock(ctx, 1) require.NoError(t, err) - witnessHeaders, witnessVals, _ := genLightBlocksWithKeys(chainID, 2, 5, 2, bTime) + witnessHeaders, witnessVals, _ := genLightBlocksWithKeys(t, chainID, 2, 5, 2, bTime) primaryHeaders[2] = witnessHeaders[2] primaryVals[2] = witnessVals[2] mockWitness := mockNodeFromHeadersAndVals(primaryHeaders, primaryVals) @@ -496,7 +533,7 @@ func TestClientDivergentTraces4(t *testing.T) { mockPrimary, []provider.Provider{mockWitness}, dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) diff --git a/light/example_test.go b/light/example_test.go index 2e0feb5e15..d22525b309 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" stdlog "log" "os" "time" @@ -22,8 +21,15 @@ import ( func ExampleClient() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") - logger := log.TestingLogger() + conf, err := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + if err != nil { + stdlog.Fatal(err) + } + + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + stdlog.Fatal(err) + } // Start a test application app := kvstore.NewApplication() @@ -34,7 +40,7 @@ func ExampleClient() { } defer func() { _ = closer(ctx) }() - dbDir, err := ioutil.TempDir("", "light-client-example") + dbDir, err := os.MkdirTemp("", "light-client-example") if err != nil { stdlog.Fatal(err) } @@ -85,7 +91,7 @@ func ExampleClient() { time.Sleep(2 * time.Second) // veify the block at height 3 - _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now()) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now()) if err != nil { stdlog.Fatal(err) } diff --git a/light/helpers_test.go b/light/helpers_test.go index 1d25f9166a..9f61475262 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -1,9 +1,11 @@ package light_test import ( + "testing" "time" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" @@ -74,7 +76,9 @@ func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { } // signHeader properly signs the header with all keys from first to last exclusive. -func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { +func (pkz privKeys) signHeader(t testing.TB, header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { + t.Helper() + commitSigs := make([]types.CommitSig, len(pkz)) for i := 0; i < len(pkz); i++ { commitSigs[i] = types.NewCommitSigAbsent() @@ -87,15 +91,15 @@ func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, // Fill in the votes we want. for i := first; i < last && i < len(pkz); i++ { - vote := makeVote(header, valSet, pkz[i], blockID) + vote := makeVote(t, header, valSet, pkz[i], blockID) commitSigs[vote.ValidatorIndex] = vote.CommitSig() } return types.NewCommit(header.Height, 1, blockID, commitSigs) } -func makeVote(header *types.Header, valset *types.ValidatorSet, - key crypto.PrivKey, blockID types.BlockID) *types.Vote { +func makeVote(t testing.TB, header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey, blockID types.BlockID) *types.Vote { + t.Helper() addr := key.PubKey().Address() idx, _ := valset.GetByAddress(addr) @@ -113,9 +117,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, // Sign it signBytes := types.VoteSignBytes(header.ChainID, v) sig, err := key.Sign(signBytes) - if err != nil { - panic(err) - } + require.NoError(t, err) vote.Signature = sig @@ -143,26 +145,30 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, +func (pkz privKeys) GenSignedHeader(t testing.TB, chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { + t.Helper() + header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, valset, first, last), + Commit: pkz.signHeader(t, header, valset, first, last), } } // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, +func (pkz privKeys) GenSignedHeaderLastBlockID(t testing.TB, chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, lastBlockID types.BlockID) *types.SignedHeader { + t.Helper() + header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) header.LastBlockID = lastBlockID return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, valset, first, last), + Commit: pkz.signHeader(t, header, valset, first, last), } } @@ -175,14 +181,14 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys { // blocks to height. BlockIntervals are in per minute. // NOTE: Expected to have a large validator set size ~ 100 validators. func genLightBlocksWithKeys( + t testing.TB, chainID string, numBlocks int64, valSize int, valVariation float32, - bTime time.Time) ( - map[int64]*types.SignedHeader, - map[int64]*types.ValidatorSet, - map[int64]privKeys) { + bTime time.Time, +) (map[int64]*types.SignedHeader, map[int64]*types.ValidatorSet, map[int64]privKeys) { + t.Helper() var ( headers = make(map[int64]*types.SignedHeader, numBlocks) @@ -201,7 +207,7 @@ func genLightBlocksWithKeys( keymap[2] = newKeys // genesis header and vals - lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil, + lastHeader := keys.GenSignedHeader(t, chainID, 1, bTime.Add(1*time.Minute), nil, keys.ToValidators(2, 0), newKeys.ToValidators(2, 0), hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) currentHeader := lastHeader @@ -214,7 +220,7 @@ func genLightBlocksWithKeys( valVariationInt = int(totalVariation) totalVariation = -float32(valVariationInt) newKeys = keys.ChangeKeys(valVariationInt) - currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + currentHeader = keys.GenSignedHeaderLastBlockID(t, chainID, height, bTime.Add(time.Duration(height)*time.Minute), nil, keys.ToValidators(2, 0), newKeys.ToValidators(2, 0), hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) diff --git a/light/light_test.go b/light/light_test.go index f5d2ddd89f..00d0741ce9 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -2,7 +2,6 @@ package light_test import ( "context" - "io/ioutil" "os" "testing" "time" @@ -29,7 +28,10 @@ func TestClientIntegration_Update(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) + + logger := log.NewTestingLogger(t) // Start a test application app := kvstore.NewApplication() @@ -40,7 +42,7 @@ func TestClientIntegration_Update(t *testing.T) { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-test-update-example") + dbDir, err := os.MkdirTemp("", "light-client-test-update-example") require.NoError(t, err) defer os.RemoveAll(dbDir) @@ -67,7 +69,7 @@ func TestClientIntegration_Update(t *testing.T) { primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -89,7 +91,10 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) + + logger := log.NewTestingLogger(t) // Start a test application app := kvstore.NewApplication() @@ -98,7 +103,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := ioutil.TempDir("", "light-client-test-verify-example") + dbDir, err := os.MkdirTemp("", "light-client-test-verify-example") require.NoError(t, err) defer os.RemoveAll(dbDir) @@ -124,7 +129,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -162,3 +167,93 @@ func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*type } } } + +func TestClientStatusRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) + + // Start a test application + app := kvstore.NewApplication() + + _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) + require.NoError(t, err) + defer func() { require.NoError(t, closer(ctx)) }() + + dbDir, err := os.MkdirTemp("", "light-client-test-status-example") + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(dbDir) }) + + chainID := conf.ChainID() + + primary, err := httpp.New(chainID, conf.RPC.ListenAddress) + require.NoError(t, err) + + // give Tendermint time to generate some blocks + block, err := waitForBlock(ctx, primary, 2) + require.NoError(t, err) + + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + require.NoError(t, err) + + // In order to not create a full testnet to verify whether we get the correct IPs + // if we have more than one witness, we add the primary multiple times + // TODO This should be buggy behavior, we should not be allowed to add the same nodes as witnesses + witnesses := []provider.Provider{primary, primary, primary} + + c, err := light.NewClient(ctx, + chainID, + light.TrustOptions{ + Period: 504 * time.Hour, // 21 days + Height: 2, + Hash: block.Hash(), + }, + primary, + witnesses, + dbs.New(db), + light.Logger(log.TestingLogger()), + ) + require.NoError(t, err) + + defer func() { require.NoError(t, c.Cleanup()) }() + + lightStatus := c.Status(ctx) + + // Verify primary IP + require.True(t, lightStatus.PrimaryID == primary.ID()) + + // Verify IPs of witnesses + require.ElementsMatch(t, mapProviderArrayToIP(witnesses), lightStatus.WitnessesID) + + // Verify that number of peers is equal to number of witnesses (+ 1 if the primary is not a witness) + require.Equal(t, len(witnesses)+1*primaryNotInWitnessList(witnesses, primary), lightStatus.NumPeers) + + // Verify that the last trusted hash returned matches the stored hash of the trusted + // block at the last trusted height. + blockAtTrustedHeight, err := c.TrustedLightBlock(lightStatus.LastTrustedHeight) + require.NoError(t, err) + + require.EqualValues(t, lightStatus.LastTrustedHash, blockAtTrustedHeight.Hash()) + +} + +// Extract the IP address of all the providers within an array +func mapProviderArrayToIP(el []provider.Provider) []string { + ips := make([]string, len(el)) + for i, v := range el { + ips[i] = v.ID() + } + return ips +} + +// If the primary is not in the witness list, we will return 1 +// Otherwise, return 0 +func primaryNotInWitnessList(witnesses []provider.Provider, primary provider.Provider) int { + for _, el := range witnesses { + if el == primary { + return 0 + } + } + return 1 +} diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go index bf6ab3d435..55a05a6070 100644 --- a/light/mbt/driver_test.go +++ b/light/mbt/driver_test.go @@ -1,14 +1,14 @@ package mbt import ( - "io/ioutil" + "encoding/json" + "os" "path/filepath" "testing" "time" "github.com/stretchr/testify/require" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/types" ) @@ -22,13 +22,13 @@ func TestVerify(t *testing.T) { filename := filename t.Run(filename, func(t *testing.T) { - jsonBlob, err := ioutil.ReadFile(filename) + jsonBlob, err := os.ReadFile(filename) if err != nil { t.Fatal(err) } var tc testCase - err = tmjson.Unmarshal(jsonBlob, &tc) + err = json.Unmarshal(jsonBlob, &tc) if err != nil { t.Fatal(err) } @@ -103,7 +103,7 @@ type testCase struct { type initialData struct { SignedHeader types.SignedHeader `json:"signed_header"` NextValidatorSet types.ValidatorSet `json:"next_validator_set"` - TrustingPeriod uint64 `json:"trusting_period"` + TrustingPeriod uint64 `json:"trusting_period,string"` Now time.Time `json:"now"` } diff --git a/light/provider/errors.go b/light/provider/errors.go index 355ec34753..d1a39f0c09 100644 --- a/light/provider/errors.go +++ b/light/provider/errors.go @@ -28,16 +28,20 @@ type ErrBadLightBlock struct { } func (e ErrBadLightBlock) Error() string { - return fmt.Sprintf("client provided bad signed header: %s", e.Reason.Error()) + return fmt.Sprintf("client provided bad signed header: %v", e.Reason) } +func (e ErrBadLightBlock) Unwrap() error { return e.Reason } + // ErrUnreliableProvider is a generic error that indicates that the provider isn't // behaving in a reliable manner to the light client. The light client will // remove the provider type ErrUnreliableProvider struct { - Reason string + Reason error } func (e ErrUnreliableProvider) Error() string { - return fmt.Sprintf("client deemed unreliable: %s", e.Reason) + return fmt.Sprintf("client deemed unreliable: %v", e.Reason) } + +func (e ErrUnreliableProvider) Unwrap() error { return e.Reason } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index f8bf7d29ea..cf443e1b52 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -100,7 +100,8 @@ func NewWithClientAndOptions(chainID string, client rpcclient.RemoteClient, opti } } -func (p *http) String() string { +// Identifies the provider with an IP in string format +func (p *http) ID() string { return fmt.Sprintf("http{%s}", p.client.Remote()) } @@ -212,7 +213,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato // If we don't know the error then by default we return an unreliable provider error and // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e.Error()} + return nil, provider.ErrUnreliableProvider{Reason: e} } // update the total and increment the page index so we can fetch the @@ -268,7 +269,7 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe // If we don't know the error then by default we return an unreliable provider error and // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e.Error()} + return nil, provider.ErrUnreliableProvider{Reason: e} } } return nil, p.noResponse() @@ -278,7 +279,7 @@ func (p *http) noResponse() error { p.noResponseCount++ if p.noResponseCount > p.noResponseThreshold { return provider.ErrUnreliableProvider{ - Reason: fmt.Sprintf("failed to respond after %d attempts", p.noResponseCount), + Reason: fmt.Errorf("failed to respond after %d attempts", p.noResponseCount), } } return provider.ErrNoResponse @@ -288,7 +289,7 @@ func (p *http) noBlock(e error) error { p.noBlockCount++ if p.noBlockCount > p.noBlockThreshold { return provider.ErrUnreliableProvider{ - Reason: fmt.Sprintf("failed to provide a block after %d attempts", p.noBlockCount), + Reason: fmt.Errorf("failed to provide a block after %d attempts", p.noBlockCount), } } return e diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index adcb69fb9f..4c7761d501 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -2,7 +2,7 @@ package http_test import ( "context" - "fmt" + "errors" "testing" "time" @@ -21,21 +21,22 @@ import ( func TestNewProvider(t *testing.T) { c, err := lighthttp.New("chain-test", "192.168.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://192.168.0.1:26657}") + require.Equal(t, c.ID(), "http{http://192.168.0.1:26657}") c, err = lighthttp.New("chain-test", "http://153.200.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1:26657}") + require.Equal(t, c.ID(), "http{http://153.200.0.1:26657}") c, err = lighthttp.New("chain-test", "153.200.0.1") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}") + require.Equal(t, c.ID(), "http{http://153.200.0.1}") } func TestProvider(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against app := kvstore.NewApplication() @@ -51,18 +52,18 @@ func TestProvider(t *testing.T) { t.Log("chainID:", chainID) c, err := rpchttp.New(rpcAddr) - require.Nil(t, err) + require.NoError(t, err) p := lighthttp.NewWithClient(chainID, c) require.NoError(t, err) require.NotNil(t, p) // let it produce some blocks - err = rpcclient.WaitForHeight(c, 10, nil) + err = rpcclient.WaitForHeight(ctx, c, 10, nil) require.NoError(t, err) // let's get the highest block - lb, err := p.LightBlock(context.Background(), 0) + lb, err := p.LightBlock(ctx, 0) require.NoError(t, err) assert.True(t, lb.Height < 9001, "height=%d", lb.Height) @@ -71,25 +72,25 @@ func TestProvider(t *testing.T) { // historical queries now work :) lower := lb.Height - 3 - lb, err = p.LightBlock(context.Background(), lower) + lb, err = p.LightBlock(ctx, lower) require.NoError(t, err) assert.Equal(t, lower, lb.Height) // fetching missing heights (both future and pruned) should return appropriate errors - lb, err = p.LightBlock(context.Background(), 9001) + lb, err = p.LightBlock(ctx, 9001) require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrHeightTooHigh, err) + assert.ErrorIs(t, err, provider.ErrHeightTooHigh) - lb, err = p.LightBlock(context.Background(), 1) + lb, err = p.LightBlock(ctx, 1) require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrLightBlockNotFound, err) + assert.ErrorIs(t, err, provider.ErrLightBlockNotFound) // if the provider is unable to provide four more blocks then we should return // an unreliable peer error for i := 0; i < 4; i++ { - _, err = p.LightBlock(context.Background(), 1) + _, err = p.LightBlock(ctx, 1) } assert.IsType(t, provider.ErrUnreliableProvider{}, err) @@ -98,9 +99,11 @@ func TestProvider(t *testing.T) { cancel() time.Sleep(10 * time.Second) - lb, err = p.LightBlock(context.Background(), lower+2) - // we should see a connection refused + lb, err = p.LightBlock(ctx, lower+2) + // Either the connection should be refused, or the context canceled. require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrConnectionClosed, err) + if !errors.Is(err, provider.ErrConnectionClosed) && !errors.Is(err, context.Canceled) { + assert.Fail(t, "Incorrect error", "wanted connection closed or context canceled, got %v", err) + } } diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index aa36fa2d34..1b4e583de8 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -15,6 +15,20 @@ type Provider struct { mock.Mock } +// ID provides a mock function with given fields: +func (_m *Provider) ID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // LightBlock provides a mock function with given fields: ctx, height func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { ret := _m.Called(ctx, height) diff --git a/light/provider/provider.go b/light/provider/provider.go index 7f15d5c752..d1b3304daa 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -25,4 +25,8 @@ type Provider interface { // ReportEvidence reports an evidence of misbehavior. ReportEvidence(context.Context, types.Evidence) error + + // Returns the ID of a provider. For RPC providers it returns the IP address of the client + // For p2p providers it returns a combination of NodeID and IP address + ID() string } diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 6f26225887..6e7a5ff2a6 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -6,8 +6,9 @@ import ( "net" "net/http" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/light" lrpc "github.com/tendermint/tendermint/light/rpc" rpchttp "github.com/tendermint/tendermint/rpc/client/http" @@ -40,7 +41,7 @@ func NewProxy( return &Proxy{ Addr: listenAddr, Config: config, - Client: lrpc.NewClient(rpcClient, lightClient, opts...), + Client: lrpc.NewClient(logger, rpcClient, lightClient, opts...), Logger: logger, }, nil } @@ -49,14 +50,15 @@ func NewProxy( // routes to proxy via Client, and starts up an HTTP server on the TCP network // address p.Addr. // See http#Server#ListenAndServe. -func (p *Proxy) ListenAndServe() error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServe(ctx context.Context) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } p.Listener = listener return rpcserver.Serve( + ctx, listener, mux, p.Logger, @@ -67,14 +69,15 @@ func (p *Proxy) ListenAndServe() error { // ListenAndServeTLS acts identically to ListenAndServe, except that it expects // HTTPS connections. // See http#Server#ListenAndServeTLS. -func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } p.Listener = listener return rpcserver.ServeTLS( + ctx, listener, mux, certFile, @@ -84,16 +87,16 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { ) } -func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { +func (p *Proxy) listen(ctx context.Context) (net.Listener, *http.ServeMux, error) { mux := http.NewServeMux() // 1) Register regular routes. - r := RPCRoutes(p.Client) + r := rpccore.NewRoutesMap(proxyService{Client: p.Client}, nil) rpcserver.RegisterRPCFuncs(mux, r, p.Logger) // 2) Allow websocket connections. wmLogger := p.Logger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(r, + wm := rpcserver.NewWebsocketManager(wmLogger, r, rpcserver.OnDisconnect(func(remoteAddr string) { err := p.Client.UnsubscribeAll(context.Background(), remoteAddr) if err != nil && err != tmpubsub.ErrSubscriptionNotFound { @@ -102,12 +105,12 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { }), rpcserver.ReadLimit(p.Config.MaxBodyBytes), ) - wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) // 3) Start a client. if !p.Client.IsRunning() { - if err := p.Client.Start(); err != nil { + if err := p.Client.Start(ctx); err != nil { return nil, mux, fmt.Errorf("can't start client: %w", err) } } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 436ae1b765..24e7e18c20 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -1,286 +1,44 @@ package proxy import ( - "github.com/tendermint/tendermint/libs/bytes" + "context" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) -func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { - return map[string]*rpcserver.RPCFunc{ - // Subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpcserver.NewWSRPCFunc(c.SubscribeWS, "query"), - "unsubscribe": rpcserver.NewWSRPCFunc(c.UnsubscribeWS, "query"), - "unsubscribe_all": rpcserver.NewWSRPCFunc(c.UnsubscribeAllWS, ""), - - // info API - "health": rpcserver.NewRPCFunc(makeHealthFunc(c), "", false), - "status": rpcserver.NewRPCFunc(makeStatusFunc(c), "", false), - "net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), "", false), - "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", true), - "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", true), - "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", true), - "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", true), - "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", true), - "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", true), - "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", true), - "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", true), - "tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by", false), - "block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by", false), - "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page", true), - "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), "", false), - "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), "", false), - "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", true), - "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit", false), - "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), "", false), - - // tx broadcast API - "broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx", false), - "broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx", false), - "broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx", false), - - // abci API - "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove", false), - "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", true), - - // evidence API - "broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence", false), - } -} - -type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) - -func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { - return c.Health(ctx.Context()) - } -} - -type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) - -// nolint: interfacer -func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { - return c.Status(ctx.Context()) - } +// proxyService wraps a light RPC client to export the RPC service interfaces. +// This is needed because the service and the client use different signatures +// for some of the methods. +type proxyService struct { + *lrpc.Client } -type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) - -func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { - return c.NetInfo(ctx.Context()) - } +func (p proxyService) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) { + return p.ABCIQueryWithOptions(ctx, path, data, rpcclient.ABCIQueryOptions{ + Height: height, + Prove: prove, + }) } -type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) //nolint:lll - -func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) - } +func (p proxyService) GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { + return p.ConsensusState(ctx) } -type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) - -func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { - return c.Genesis(ctx.Context()) - } +func (p proxyService) Subscribe(ctx context.Context, query string) (*coretypes.ResultSubscribe, error) { + return p.SubscribeWS(ctx, query) } -type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) - -func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { - return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { - return c.GenesisChunked(ctx.Context(), chunk) - } +func (p proxyService) Unsubscribe(ctx context.Context, query string) (*coretypes.ResultUnsubscribe, error) { + return p.UnsubscribeWS(ctx, query) } -type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) - -func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.Block(ctx.Context(), height) - } +func (p proxyService) UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + return p.UnsubscribeAllWS(ctx) } -type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) - -func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) { - return c.BlockByHash(ctx.Context(), hash) - } -} - -type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) - -func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) { - return c.BlockResults(ctx.Context(), height) - } -} - -type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) - -func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.Commit(ctx.Context(), height) - } -} - -type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) - -func makeTxFunc(c *lrpc.Client) rpcTxFunc { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { - return c.Tx(ctx.Context(), hash, prove) - } -} - -type rpcTxSearchFunc func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, -) (*coretypes.ResultTxSearch, error) - -func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { - return func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, - ) (*coretypes.ResultTxSearch, error) { - return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) - } -} - -type rpcBlockSearchFunc func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, -) (*coretypes.ResultBlockSearch, error) - -func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { - return func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, - ) (*coretypes.ResultBlockSearch, error) { - return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) - } -} - -type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage *int) (*coretypes.ResultValidators, error) - -func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { - return c.Validators(ctx.Context(), height, page, perPage) - } -} - -type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) - -func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.DumpConsensusState(ctx.Context()) - } -} - -type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) - -func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { - return c.ConsensusState(ctx.Context()) - } -} - -type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) - -func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.ConsensusParams(ctx.Context(), height) - } -} - -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) - -func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.UnconfirmedTxs(ctx.Context(), limit) - } -} - -type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) - -func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { - return c.NumUnconfirmedTxs(ctx.Context()) - } -} - -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) - -func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.BroadcastTxCommit(ctx.Context(), tx) - } -} - -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) - -func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.BroadcastTxSync(ctx.Context(), tx) - } -} - -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) - -func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.BroadcastTxAsync(ctx.Context(), tx) - } -} - -type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, - data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) - -func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { - return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*coretypes.ResultABCIQuery, error) { - - return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ - Height: height, - Prove: prove, - }) - } -} - -type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) - -func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { - return c.ABCIInfo(ctx.Context()) - } -} - -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) - -// nolint: interfacer -func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.BroadcastEvidence(ctx.Context(), ev) - } +func (p proxyService) BroadcastEvidence(ctx context.Context, ev coretypes.Evidence) (*coretypes.ResultBroadcastEvidence, error) { + return p.Client.BroadcastEvidence(ctx, ev.Value) } diff --git a/light/rpc/client.go b/light/rpc/client.go index dc745542e0..cfbdaa409a 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -13,6 +13,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" @@ -31,6 +32,7 @@ type LightClient interface { Update(ctx context.Context, now time.Time) (*types.LightBlock, error) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) TrustedLightBlock(height int64) (*types.LightBlock, error) + Status(ctx context.Context) *types.LightClientInfo } var _ rpcclient.Client = (*Client)(nil) @@ -47,6 +49,9 @@ type Client struct { // proof runtime used to verify values returned by ABCIQuery prt *merkle.ProofRuntime keyPathFn KeyPathFunc + + closers []func() + quitCh chan struct{} } var _ rpcclient.Client = (*Client)(nil) @@ -85,36 +90,53 @@ func DefaultMerkleKeyPathFn() KeyPathFunc { } // NewClient returns a new client. -func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { +func NewClient(logger log.Logger, next rpcclient.Client, lc LightClient, opts ...Option) *Client { c := &Client{ - next: next, - lc: lc, - prt: merkle.DefaultProofRuntime(), + next: next, + lc: lc, + prt: merkle.DefaultProofRuntime(), + quitCh: make(chan struct{}), } - c.BaseService = *service.NewBaseService(nil, "Client", c) + c.BaseService = *service.NewBaseService(logger, "Client", c) for _, o := range opts { o(c) } return c } -func (c *Client) OnStart() error { - if !c.next.IsRunning() { - return c.next.Start() +func (c *Client) OnStart(ctx context.Context) error { + nctx, ncancel := context.WithCancel(ctx) + if err := c.next.Start(nctx); err != nil { + ncancel() + return err } + c.closers = append(c.closers, ncancel) + go func() { + defer close(c.quitCh) + c.Wait() + }() + return nil } func (c *Client) OnStop() { - if c.next.IsRunning() { - if err := c.next.Stop(); err != nil { - c.Logger.Error("Error stopping on next", "err", err) - } + for _, closer := range c.closers { + closer() } } +// Returns the status of the light client. Previously this was querying the primary connected to the client +// As a consequence of this change, running /status on the light client will return nil for SyncInfo, NodeInfo +// and ValdiatorInfo. func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.next.Status(ctx) + lightClientInfo := c.lc.Status(ctx) + + return &coretypes.ResultStatus{ + NodeInfo: types.NodeInfo{}, + SyncInfo: coretypes.SyncInfo{}, + ValidatorInfo: coretypes.ValidatorInfo{}, + LightClientInfo: *lightClientInfo, + }, nil } func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { @@ -122,17 +144,29 @@ func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error } // ABCIQuery requests proof by default. -func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { //nolint:lll +func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } // ABCIQueryWithOptions returns an error if opts.Prove is false. +// ABCIQueryWithOptions returns the result for the given height (opts.Height). +// If no height is provided, the results of the block preceding the latest are returned. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { // always request the proof opts.Prove = true + // Can't return the latest block results because we won't be able to + // prove them. Return the results for the previous block instead. + if opts.Height == 0 { + res, err := c.next.Status(ctx) + if err != nil { + return nil, fmt.Errorf("can't get latest height: %w", err) + } + opts.Height = res.SyncInfo.LatestBlockHeight - 1 + } + res, err := c.next.ABCIQueryWithOptions(ctx, path, data, opts) if err != nil { return nil, err @@ -162,24 +196,25 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb } // Validate the value proof against the trusted header. - if resp.Value != nil { - // 1) build a Merkle key path from path and resp.Key - if c.keyPathFn == nil { - return nil, errors.New("please configure Client with KeyPathFn option") - } - kp, err := c.keyPathFn(path, resp.Key) - if err != nil { - return nil, fmt.Errorf("can't build merkle key path: %w", err) - } + // build a Merkle key path from path and resp.Key + if c.keyPathFn == nil { + return nil, errors.New("please configure Client with KeyPathFn option") + } - // 2) verify value + kp, err := c.keyPathFn(path, resp.Key) + if err != nil { + return nil, fmt.Errorf("can't build merkle key path: %w", err) + } + + // verify value + if resp.Value != nil { err = c.prt.VerifyValue(resp.ProofOps, l.AppHash, kp.String(), resp.Value) if err != nil { return nil, fmt.Errorf("verify value proof: %w", err) } } else { // OR validate the absence proof against the trusted header. - err = c.prt.VerifyAbsence(resp.ProofOps, l.AppHash, string(resp.Key)) + err = c.prt.VerifyAbsence(resp.ProofOps, l.AppHash, kp.String()) if err != nil { return nil, fmt.Errorf("verify absence proof: %w", err) } @@ -200,8 +235,8 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return c.next.BroadcastTxSync(ctx, tx) } -func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.next.UnconfirmedTxs(ctx, limit) +func (c *Client) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + return c.next.UnconfirmedTxs(ctx, page, perPage) } func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { @@ -263,7 +298,7 @@ func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { // BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header // returned. -func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll +func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) if err != nil { return nil, err @@ -442,6 +477,40 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.Re return res, nil } +// Header fetches and verifies the header directly via the light client +func (c *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { + lb, err := c.updateLightClientIfNeededTo(ctx, height) + if err != nil { + return nil, err + } + + return &coretypes.ResultHeader{Header: lb.Header}, nil +} + +// HeaderByHash calls rpcclient#HeaderByHash and updates the client if it's falling behind. +func (c *Client) HeaderByHash(ctx context.Context, hash tmbytes.HexBytes) (*coretypes.ResultHeader, error) { + res, err := c.next.HeaderByHash(ctx, hash) + if err != nil { + return nil, err + } + + if err := res.Header.ValidateBasic(); err != nil { + return nil, err + } + + lb, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height) + if err != nil { + return nil, err + } + + if !bytes.Equal(lb.Header.Hash(), res.Header.Hash()) { + return nil, fmt.Errorf("primary header hash does not match trusted header hash. (%X != %X)", + lb.Header.Hash(), res.Header.Hash()) + } + + return res, nil +} + func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { // Update the light client if we're behind and retrieve the light block at the requested height // or at the latest height if no height is provided. @@ -520,13 +589,14 @@ func (c *Client) Validators( } skipCount := validateSkipCount(page, perPage) - v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(int(perPage), totalCount-skipCount)] return &coretypes.ResultValidators{ BlockHeight: l.Height, Validators: v, Count: len(v), - Total: totalCount}, nil + Total: totalCount, + }, nil } func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { @@ -569,8 +639,12 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! // TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { - out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) +func (c *Client) SubscribeWS(ctx context.Context, query string) (*coretypes.ResultSubscribe, error) { + bctx, bcancel := context.WithCancel(context.Background()) + c.closers = append(c.closers, bcancel) + + callInfo := rpctypes.GetCallInfo(ctx) + out, err := c.next.Subscribe(bctx, callInfo.RemoteAddr(), query) if err != nil { return nil, err } @@ -581,12 +655,8 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.Re case resultEvent := <-out: // We should have a switch here that performs a validation // depending on the event's type. - ctx.WSConn.TryWriteRPCResponse( - rpctypes.NewRPCSuccessResponse( - rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), - resultEvent, - )) - case <-c.Quit(): + callInfo.WSConn.TryWriteRPCResponse(bctx, callInfo.RPCRequest.MakeResponse(resultEvent)) + case <-bctx.Done(): return } } @@ -597,8 +667,8 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.Re // UnsubscribeWS calls original client's Unsubscribe using remote address as a // subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { - err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) +func (c *Client) UnsubscribeWS(ctx context.Context, query string) (*coretypes.ResultUnsubscribe, error) { + err := c.next.Unsubscribe(context.Background(), rpctypes.GetCallInfo(ctx).RemoteAddr(), query) if err != nil { return nil, err } @@ -607,8 +677,8 @@ func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes. // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { - err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) +func (c *Client) UnsubscribeAllWS(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + err := c.next.UnsubscribeAll(context.Background(), rpctypes.GetCallInfo(ctx).RemoteAddr()) if err != nil { return nil, err } @@ -622,16 +692,13 @@ const ( maxPerPage = 100 ) -func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { - if perPage < 1 { - panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) - } +func validatePage(pagePtr *int, perPage uint, totalCount int) (int, error) { if pagePtr == nil { // no page parameter return 1, nil } - pages := ((totalCount - 1) / perPage) + 1 + pages := ((totalCount - 1) / int(perPage)) + 1 if pages == 0 { pages = 1 // one page (even if it's empty) } @@ -643,7 +710,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { return page, nil } -func validatePerPage(perPagePtr *int) int { +func validatePerPage(perPagePtr *int) uint { if perPagePtr == nil { // no per_page parameter return defaultPerPage } @@ -654,11 +721,11 @@ func validatePerPage(perPagePtr *int) int { } else if perPage > maxPerPage { return maxPerPage } - return perPage + return uint(perPage) } -func validateSkipCount(page, perPage int) int { - skipCount := (page - 1) * perPage +func validateSkipCount(page int, perPage uint) int { + skipCount := (page - 1) * int(perPage) if skipCount < 0 { return 0 } diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index cc32cf6494..347d147073 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -31,6 +31,22 @@ func (_m *LightClient) ChainID() string { return r0 } +// Status provides a mock function with given fields: ctx +func (_m *LightClient) Status(ctx context.Context) *types.LightClientInfo { + ret := _m.Called(ctx) + + var r0 *types.LightClientInfo + if rf, ok := ret.Get(0).(func(context.Context) *types.LightClientInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.LightClientInfo) + } + } + + return r0 +} + // TrustedLightBlock provides a mock function with given fields: height func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { ret := _m.Called(height) diff --git a/light/store/db/db.go b/light/store/db/db.go index acfda1f796..c364e17092 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -3,11 +3,11 @@ package db import ( "encoding/binary" "fmt" + "sync" "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/light/store" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -21,7 +21,7 @@ const ( type dbs struct { db dbm.DB - mtx tmsync.RWMutex + mtx sync.RWMutex size uint16 } diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index 7f963eb92a..7069eb11da 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -1,6 +1,7 @@ package db import ( + "context" "sync" "testing" "time" @@ -19,6 +20,8 @@ import ( func TestLast_FirstLightBlockHeight(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store height, err := dbStore.LastLightBlockHeight() @@ -30,7 +33,7 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { assert.EqualValues(t, -1, height) // 1 key - err = dbStore.SaveLightBlock(randLightBlock(int64(1))) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(1))) require.NoError(t, err) height, err = dbStore.LastLightBlockHeight() @@ -44,6 +47,8 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { func Test_SaveLightBlock(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store h, err := dbStore.LightBlock(1) @@ -51,7 +56,7 @@ func Test_SaveLightBlock(t *testing.T) { assert.Nil(t, h) // 1 key - err = dbStore.SaveLightBlock(randLightBlock(1)) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, 1)) require.NoError(t, err) size := dbStore.Size() @@ -74,13 +79,15 @@ func Test_SaveLightBlock(t *testing.T) { func Test_LightBlockBefore(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() assert.Panics(t, func() { _, _ = dbStore.LightBlockBefore(0) _, _ = dbStore.LightBlockBefore(100) }) - err := dbStore.SaveLightBlock(randLightBlock(int64(2))) + err := dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(2))) require.NoError(t, err) h, err := dbStore.LightBlockBefore(3) @@ -95,6 +102,8 @@ func Test_LightBlockBefore(t *testing.T) { func Test_Prune(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store assert.EqualValues(t, 0, dbStore.Size()) @@ -102,7 +111,7 @@ func Test_Prune(t *testing.T) { require.NoError(t, err) // One header - err = dbStore.SaveLightBlock(randLightBlock(2)) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, 2)) require.NoError(t, err) assert.EqualValues(t, 1, dbStore.Size()) @@ -117,7 +126,7 @@ func Test_Prune(t *testing.T) { // Multiple headers for i := 1; i <= 10; i++ { - err = dbStore.SaveLightBlock(randLightBlock(int64(i))) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(i))) require.NoError(t, err) } @@ -133,13 +142,16 @@ func Test_Prune(t *testing.T) { func Test_Concurrency(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup for i := 1; i <= 100; i++ { wg.Add(1) go func(i int64) { defer wg.Done() - err := dbStore.SaveLightBlock(randLightBlock(i)) + err := dbStore.SaveLightBlock(randLightBlock(ctx, t, i)) require.NoError(t, err) _, err = dbStore.LightBlock(i) @@ -182,8 +194,9 @@ func Test_Concurrency(t *testing.T) { wg.Wait() } -func randLightBlock(height int64) *types.LightBlock { - vals, _ := factory.RandValidatorSet(2, 1) +func randLightBlock(ctx context.Context, t *testing.T, height int64) *types.LightBlock { + t.Helper() + vals, _ := factory.ValidatorSet(ctx, t, 2, 1) return &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: &types.Header{ diff --git a/light/verifier.go b/light/verifier.go index ee4bfb0534..f6156c5de4 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -38,9 +38,12 @@ func VerifyNonAdjacent( trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration, - trustLevel tmmath.Fraction) error { + trustLevel tmmath.Fraction, +) error { - checkRequiredHeaderFields(trustedHeader) + if err := checkRequiredHeaderFields(trustedHeader); err != nil { + return err + } if untrustedHeader.Height == trustedHeader.Height+1 { return errors.New("headers must be non adjacent in height") @@ -106,12 +109,15 @@ func VerifyAdjacent( untrustedVals *types.ValidatorSet, // height=X+1 trustingPeriod time.Duration, now time.Time, - maxClockDrift time.Duration) error { + maxClockDrift time.Duration, +) error { - checkRequiredHeaderFields(trustedHeader) + if err := checkRequiredHeaderFields(trustedHeader); err != nil { + return err + } if len(trustedHeader.NextValidatorsHash) == 0 { - panic("next validators hash in trusted header is empty") + return errors.New("next validators hash in trusted header is empty") } if untrustedHeader.Height != trustedHeader.Height+1 { @@ -268,17 +274,18 @@ func verifyNewHeaderAndVals( return nil } -func checkRequiredHeaderFields(h *types.SignedHeader) { +func checkRequiredHeaderFields(h *types.SignedHeader) error { if h.Height == 0 { - panic("height in trusted header must be set (non zero") + return errors.New("height in trusted header must be set (non zero") } zeroTime := time.Time{} if h.Time == zeroTime { - panic("time in trusted header must be set") + return errors.New("time in trusted header must be set") } if h.ChainID == "" { - panic("chain ID in trusted header must be set") + return errors.New("chain ID in trusted header must be set") } + return nil } diff --git a/light/verifier_test.go b/light/verifier_test.go index 0432c130d9..5a2019e21f 100644 --- a/light/verifier_test.go +++ b/light/verifier_test.go @@ -28,7 +28,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, + header = keys.GenSignedHeader(t, chainID, lastHeight, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) ) @@ -51,7 +51,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // different chainID -> error 1: { - keys.GenSignedHeader("different-chainID", nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, "different-chainID", nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, @@ -61,7 +61,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // new header's time is before old header's time -> error 2: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(-1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(-1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 4 * time.Hour, @@ -71,7 +71,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // new header's time is from the future -> error 3: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(3*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(3*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, @@ -81,7 +81,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // new header's time is from the future, but it's acceptable (< maxClockDrift) -> no error 4: { - keys.GenSignedHeader(chainID, nextHeight, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(2*time.Hour).Add(maxClockDrift).Add(-1*time.Millisecond), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, @@ -92,7 +92,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // 3/3 signed -> no error 5: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, @@ -102,7 +102,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // 2/3 signed -> no error 6: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 1, len(keys)), vals, 3 * time.Hour, @@ -112,7 +112,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // 1/3 signed -> error 7: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), vals, 3 * time.Hour, @@ -122,7 +122,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // vals does not match with what we have -> error 8: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 3 * time.Hour, @@ -132,7 +132,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // vals are inconsistent with newHeader -> error 9: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 3 * time.Hour, @@ -142,7 +142,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { }, // old header has expired -> error 10: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 1 * time.Hour, @@ -180,7 +180,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, + header = keys.GenSignedHeader(t, chainID, lastHeight, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) // 30, 40, 50 @@ -206,7 +206,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }{ // 3/3 new vals signed, 3/3 old vals present -> no error 0: { - keys.GenSignedHeader(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, @@ -216,7 +216,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }, // 2/3 new vals signed, 3/3 old vals present -> no error 1: { - keys.GenSignedHeader(chainID, 4, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, 4, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 1, len(keys)), vals, 3 * time.Hour, @@ -226,7 +226,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }, // 1/3 new vals signed, 3/3 old vals present -> error 2: { - keys.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, vals, vals, + keys.GenSignedHeader(t, chainID, 5, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), vals, 3 * time.Hour, @@ -236,7 +236,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }, // 3/3 new vals signed, 2/3 old vals present -> no error 3: { - twoThirds.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, twoThirdsVals, twoThirdsVals, + twoThirds.GenSignedHeader(t, chainID, 5, bTime.Add(1*time.Hour), nil, twoThirdsVals, twoThirdsVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(twoThirds)), twoThirdsVals, 3 * time.Hour, @@ -246,7 +246,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }, // 3/3 new vals signed, 1/3 old vals present -> no error 4: { - oneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, oneThirdVals, oneThirdVals, + oneThird.GenSignedHeader(t, chainID, 5, bTime.Add(1*time.Hour), nil, oneThirdVals, oneThirdVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(oneThird)), oneThirdVals, 3 * time.Hour, @@ -256,7 +256,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { }, // 3/3 new vals signed, less than 1/3 old vals present -> error 5: { - lessThanOneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, lessThanOneThirdVals, lessThanOneThirdVals, + lessThanOneThird.GenSignedHeader(t, chainID, 5, bTime.Add(1*time.Hour), nil, lessThanOneThirdVals, lessThanOneThirdVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(lessThanOneThird)), lessThanOneThirdVals, 3 * time.Hour, @@ -296,7 +296,7 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, + header = keys.GenSignedHeader(t, chainID, lastHeight, bTime, nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) ) diff --git a/networks/remote/README.md b/networks/remote/README.md index 8f2e047363..eab906e452 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](https://docs.tendermint.com/master/networks/terraform-and-ansible.html). +See the [docs](https://docs.tendermint.com/master/tools/terraform-and-ansible.html). diff --git a/networks/remote/ansible/config.yml b/networks/remote/ansible/config.yml index 7b772fb706..43f08c979d 100644 --- a/networks/remote/ansible/config.yml +++ b/networks/remote/ansible/config.yml @@ -1,8 +1,6 @@ --- - -#Requires BINARY and CONFIGDIR variables set. -#N=4 hosts by default. - +# Requires BINARY and CONFIGDIR variables set. +# N=4 hosts by default. - hosts: all user: root any_errors_fatal: true @@ -15,4 +13,3 @@ - config - unsafe_reset - start - diff --git a/networks/remote/ansible/install.yml b/networks/remote/ansible/install.yml index a57b4be444..9920fba157 100644 --- a/networks/remote/ansible/install.yml +++ b/networks/remote/ansible/install.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -8,4 +7,3 @@ - service: tendermint roles: - install - diff --git a/networks/remote/ansible/logzio.yml b/networks/remote/ansible/logzio.yml index 53f637f2fd..4636df5fb5 100644 --- a/networks/remote/ansible/logzio.yml +++ b/networks/remote/ansible/logzio.yml @@ -1,7 +1,5 @@ --- - -#Note: You need to add LOGZIO_TOKEN variable with your API key. Like tihs: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456 - +# Note: You need to add LOGZIO_TOKEN variable with your API key. Like tihs: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456 - hosts: all user: root any_errors_fatal: true @@ -11,4 +9,3 @@ - JOURNALBEAT_BINARY: "{{lookup('env', 'GOPATH')}}/bin/journalbeat" roles: - logzio - diff --git a/networks/remote/ansible/reset.yml b/networks/remote/ansible/reset.yml index 63b1733c78..76a27db2f6 100644 --- a/networks/remote/ansible/reset.yml +++ b/networks/remote/ansible/reset.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -10,5 +9,3 @@ - stop - unsafe_reset - start - - diff --git a/networks/remote/ansible/restart.yml b/networks/remote/ansible/restart.yml index 71d4bc66d8..540d8c690c 100644 --- a/networks/remote/ansible/restart.yml +++ b/networks/remote/ansible/restart.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -9,4 +8,3 @@ roles: - stop - start - diff --git a/networks/remote/ansible/roles/config/tasks/main.yml b/networks/remote/ansible/roles/config/tasks/main.yml index a51098caa2..aac02a7e53 100644 --- a/networks/remote/ansible/roles/config/tasks/main.yml +++ b/networks/remote/ansible/roles/config/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Copy binary copy: src: "{{BINARY}}" @@ -13,5 +12,4 @@ dest: "/home/{{service}}/.{{service}}/" owner: "{{service}}" group: "{{service}}" - loop: [ 0, 1, 2, 3, 4, 5, 6, 7 ] - + loop: [0, 1, 2, 3, 4, 5, 6, 7] diff --git a/networks/remote/ansible/roles/install/handlers/main.yml b/networks/remote/ansible/roles/install/handlers/main.yml index 16afbb6188..ab39f51ee6 100644 --- a/networks/remote/ansible/roles/install/handlers/main.yml +++ b/networks/remote/ansible/roles/install/handlers/main.yml @@ -1,5 +1,3 @@ --- - - name: reload services systemd: "name={{service}} daemon_reload=yes enabled=yes" - diff --git a/networks/remote/ansible/roles/install/tasks/main.yml b/networks/remote/ansible/roles/install/tasks/main.yml index 9e5a7524aa..effc3fb9f7 100644 --- a/networks/remote/ansible/roles/install/tasks/main.yml +++ b/networks/remote/ansible/roles/install/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Create service group group: "name={{service}}" @@ -12,4 +11,3 @@ - name: Create service template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" notify: reload services - diff --git a/networks/remote/ansible/roles/logzio/handlers/main.yml b/networks/remote/ansible/roles/logzio/handlers/main.yml index 0b371fc517..ad668d6296 100644 --- a/networks/remote/ansible/roles/logzio/handlers/main.yml +++ b/networks/remote/ansible/roles/logzio/handlers/main.yml @@ -1,8 +1,6 @@ --- - - name: reload daemon command: "systemctl daemon-reload" - name: restart journalbeat service: name=journalbeat state=restarted - diff --git a/networks/remote/ansible/roles/logzio/tasks/main.yml b/networks/remote/ansible/roles/logzio/tasks/main.yml index ab3976f22a..580c81e8a8 100644 --- a/networks/remote/ansible/roles/logzio/tasks/main.yml +++ b/networks/remote/ansible/roles/logzio/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Copy journalbeat binary copy: src="{{JOURNALBEAT_BINARY}}" dest=/usr/bin/journalbeat mode=0755 notify: restart journalbeat @@ -24,4 +23,3 @@ notify: - reload daemon - restart journalbeat - diff --git a/networks/remote/ansible/roles/start/tasks/main.yml b/networks/remote/ansible/roles/start/tasks/main.yml index 6bc611c91c..0d19efc7a4 100644 --- a/networks/remote/ansible/roles/start/tasks/main.yml +++ b/networks/remote/ansible/roles/start/tasks/main.yml @@ -1,5 +1,3 @@ --- - - name: start service service: "name={{service}} state=started" - diff --git a/networks/remote/ansible/roles/status/tasks/main.yml b/networks/remote/ansible/roles/status/tasks/main.yml index 50170c7464..1bd4039ead 100644 --- a/networks/remote/ansible/roles/status/tasks/main.yml +++ b/networks/remote/ansible/roles/status/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: application service status command: "service {{service}} status" changed_when: false @@ -7,4 +6,3 @@ - name: Result debug: var=status.stdout_lines - diff --git a/networks/remote/ansible/roles/stop/tasks/main.yml b/networks/remote/ansible/roles/stop/tasks/main.yml index 7db356f224..0bbcfbaac2 100644 --- a/networks/remote/ansible/roles/stop/tasks/main.yml +++ b/networks/remote/ansible/roles/stop/tasks/main.yml @@ -1,5 +1,3 @@ --- - - name: stop service service: "name={{service}} state=stopped" - diff --git a/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml b/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml index 6ac1ec55a2..59ae68d171 100644 --- a/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml +++ b/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml @@ -1,4 +1,3 @@ - command: "{{service}} unsafe_reset_all {{ (service != 'tendermint') | ternary('node','') }} --home /home/{{service}}/.{{service}}" become_user: "{{service}}" become: yes - diff --git a/networks/remote/ansible/start.yml b/networks/remote/ansible/start.yml index 2be07dc732..ceb72e63cb 100644 --- a/networks/remote/ansible/start.yml +++ b/networks/remote/ansible/start.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -8,4 +7,3 @@ - service: tendermint roles: - start - diff --git a/networks/remote/ansible/status.yml b/networks/remote/ansible/status.yml index a1721b87b6..ff01d227c5 100644 --- a/networks/remote/ansible/status.yml +++ b/networks/remote/ansible/status.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -8,4 +7,3 @@ - service: tendermint roles: - status - diff --git a/networks/remote/ansible/stop.yml b/networks/remote/ansible/stop.yml index abc6031d57..d269f8d9be 100644 --- a/networks/remote/ansible/stop.yml +++ b/networks/remote/ansible/stop.yml @@ -1,5 +1,4 @@ --- - - hosts: all user: root any_errors_fatal: true @@ -8,4 +7,3 @@ - service: tendermint roles: - stop - diff --git a/node/node.go b/node/node.go index 727722279a..4a900368d3 100644 --- a/node/node.go +++ b/node/node.go @@ -11,14 +11,16 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/rs/cors" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/internal/proxy" rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" @@ -26,14 +28,10 @@ import ( "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - tmgrpc "github.com/tendermint/tendermint/privval/grpc" - rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port @@ -45,6 +43,7 @@ import ( // It includes all configuration information and running services. type nodeImpl struct { service.BaseService + logger log.Logger // config config *config.Config @@ -52,7 +51,6 @@ type nodeImpl struct { privValidator types.PrivValidator // local node's validator key // network - transport *p2p.MConnTransport peerManager *p2p.PeerManager router *p2p.Router nodeInfo types.NodeInfo @@ -60,55 +58,51 @@ type nodeImpl struct { isListening bool // services - eventBus *types.EventBus // pub/sub for services eventSinks []indexer.EventSink stateStore sm.Store - blockStore *store.BlockStore // store the blockchain to disk - bcReactor service.Service // for block-syncing - mempoolReactor service.Service // for gossipping transactions - mempool mempool.Mempool + blockStore *store.BlockStore // store the blockchain to disk stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusReactor *consensus.Reactor // for participating in the consensus - pexReactor service.Service // for exchanging peer addresses - evidenceReactor service.Service - rpcListeners []net.Listener // rpc servers - shutdownOps closer - indexerService service.Service - rpcEnv *rpccore.Environment - prometheusSrv *http.Server + + services []service.Service + rpcListeners []net.Listener // rpc servers + shutdownOps closer + rpcEnv *rpccore.Environment + prometheusSrv *http.Server } // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) { +func newDefaultNode( + ctx context.Context, + cfg *config.Config, + logger log.Logger, +) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) } if cfg.Mode == config.ModeSeed { - return makeSeedNode(cfg, + return makeSeedNode( + ctx, + cfg, config.DefaultDBProvider, nodeKey, defaultGenesisDocProviderFunc(cfg), logger, ) } - - var pval *privval.FilePV - if cfg.Mode == config.ModeValidator { - pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) - if err != nil { - return nil, err - } - } else { - pval = nil + pval, err := makeDefaultPrivval(cfg) + if err != nil { + return nil, err } - appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + appClient, _ := proxy.DefaultClientCreator(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - return makeNode(cfg, + return makeNode( + ctx, + cfg, pval, nodeKey, appClient, @@ -119,15 +113,20 @@ func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, err } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(cfg *config.Config, - privValidator types.PrivValidator, +func makeNode( + ctx context.Context, + cfg *config.Config, + filePrivval *privval.FilePV, nodeKey types.NodeKey, clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, dbProvider config.DBProvider, logger log.Logger, ) (service.Service, error) { - closers := []closer{} + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + + closers := []closer{convertCancelCloser(cancel)} blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { @@ -152,58 +151,40 @@ func makeNode(cfg *config.Config, state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) - if err != nil { - return nil, combineCloseError(err, makeCloser(closers)) - + proxyApp := proxy.NewAppConns(clientCreator, logger.With("module", "proxy"), nodeMetrics.proxy) + if err := proxyApp.Start(ctx); err != nil { + return nil, fmt.Errorf("error starting proxy app connections: %w", err) } // EventBus and IndexerService must be started before the handshake because // we might need to index the txs of the replayed block as this might not have happened // when the node stopped last time (i.e. the node stopped after it saved the block // but before it indexed the txs, or, endblocker panicked) - eventBus, err := createAndStartEventBus(logger) - if err != nil { + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } - indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) + indexerService, eventSinks, err := createAndStartIndexerService( + ctx, cfg, dbProvider, eventBus, + logger, genDoc.ChainID, nodeMetrics.indexer) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) } - // If an address is provided, listen on the socket for a connection from an - // external signing process. - if cfg.PrivValidator.ListenAddr != "" { - protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr) - // FIXME: we should start services inside OnStart - switch protocol { - case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger) - if err != nil { - return nil, combineCloseError( - fmt.Errorf("error with private validator grpc client: %w", err), - makeCloser(closers)) - } - default: - privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger) - if err != nil { - return nil, combineCloseError( - fmt.Errorf("error with private validator socket client: %w", err), - makeCloser(closers)) - } - } + privValidator, err := createPrivval(ctx, logger, cfg, genDoc, filePrivval) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) } + var pubKey crypto.PubKey if cfg.Mode == config.ModeValidator { - pubKey, err = privValidator.GetPubKey(context.TODO()) + pubKey, err = privValidator.GetPubKey(ctx) if err != nil { return nil, combineCloseError(fmt.Errorf("can't get pubkey: %w", err), makeCloser(closers)) @@ -225,11 +206,12 @@ func makeNode(cfg *config.Config, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := consensus.NewHandshaker( + logger.With("module", "handshaker"), + stateStore, state, blockStore, eventBus, genDoc, + ).Handshake(ctx, proxyApp); err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } // Reload the state. It will have the Version.Consensus.App set by the @@ -245,21 +227,17 @@ func makeNode(cfg *config.Config, // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) + blockSync := !onlyValidatorIsUs(state, pubKey) - logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode) + logNodeStartupInfo(state, pubKey, logger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. // TODO: Use a persistent peer database. nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) - peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) closers = append(closers, peerCloser) if err != nil { @@ -268,28 +246,26 @@ func makeNode(cfg *config.Config, makeCloser(closers)) } - router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, proxyApp)) + router, err := createRouter(ctx, logger, nodeMetrics.p2p, nodeInfo, nodeKey, + peerManager, cfg, proxyApp) if err != nil { return nil, combineCloseError( fmt.Errorf("failed to create router: %w", err), makeCloser(closers)) } - mpReactor, mp, err := createMempoolReactor( + mpReactor, mp, err := createMempoolReactor(ctx, cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } - evReactor, evPool, err := createEvidenceReactor( + evReactor, evPool, err := createEvidenceReactor(ctx, cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } // make block executor for consensus and blockchain reactors to execute blocks @@ -303,10 +279,10 @@ func makeNode(cfg *config.Config, sm.BlockExecutorWithMetrics(nodeMetrics.state), ) - csReactor, csState, err := createConsensusReactor( + csReactor, csState, err := createConsensusReactor(ctx, cfg, state, blockExec, blockStore, mp, evPool, privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, - peerManager, router, consensusLogger, + peerManager, router, logger, ) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) @@ -314,13 +290,21 @@ func makeNode(cfg *config.Config, // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. - bcReactor, err := createBlockchainReactor( - logger, state, blockExec, blockStore, csReactor, - peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, + bcReactor, err := blocksync.NewReactor(ctx, + logger.With("module", "blockchain"), + state.Copy(), + blockExec, + blockStore, + csReactor, + router.OpenChannel, + peerManager.Subscribe(ctx), + blockSync && !stateSync, + nodeMetrics.consensus, + eventBus, ) if err != nil { return nil, combineCloseError( - fmt.Errorf("could not create blockchain reactor: %w", err), + fmt.Errorf("could not create blocksync reactor: %w", err), makeCloser(closers)) } @@ -336,90 +320,60 @@ func makeNode(cfg *config.Config, // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - ssLogger := logger.With("module", "statesync") - ssChDesc := statesync.GetChannelDescriptors() - channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) - for idx := range ssChDesc { - chd := ssChDesc[idx] - ch, err := router.OpenChannel(chd) - if err != nil { - return nil, err - } - - channels[ch.ID] = ch - } - - peerUpdates := peerManager.Subscribe() - stateSyncReactor := statesync.NewReactor( + stateSyncReactor, err := statesync.NewReactor( + ctx, genDoc.ChainID, genDoc.InitialHeight, *cfg.StateSync, - ssLogger, + logger.With("module", "statesync"), proxyApp.Snapshot(), proxyApp.Query(), - channels[statesync.SnapshotChannel], - channels[statesync.ChunkChannel], - channels[statesync.LightBlockChannel], - channels[statesync.ParamsChannel], - peerUpdates, + router.OpenChannel, + peerManager.Subscribe(ctx), stateStore, blockStore, cfg.StateSync.TempDir, nodeMetrics.statesync, + eventBus, ) - - // Optionally, start the pex reactor - // - // TODO: - // - // We need to set Seeds and PersistentPeers on the switch, - // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, - // but it would still be nice to have a clear list of the current "PersistentPeers" - // somewhere that we can return with net_info. - // - // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. - // Note we currently use the addrBook regardless at least for AddOurAddress - - var pexReactor service.Service - - pexReactor, err = createPEXReactor(logger, peerManager, router) if err != nil { return nil, combineCloseError(err, makeCloser(closers)) - } - if cfg.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) - }() + var pexReactor service.Service + if cfg.P2P.PexReactor { + pexReactor, err = pex.NewReactor(ctx, logger, peerManager, router.OpenChannel, peerManager.Subscribe(ctx)) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } } - node := &nodeImpl{ config: cfg, + logger: logger, genesisDoc: genDoc, privValidator: privValidator, - transport: transport, peerManager: peerManager, router: router, nodeInfo: nodeInfo, nodeKey: nodeKey, + eventSinks: eventSinks, + + services: []service.Service{ + eventBus, + indexerService, + evReactor, + mpReactor, + csReactor, + bcReactor, + pexReactor, + }, + stateStore: stateStore, blockStore: blockStore, - bcReactor: bcReactor, - mempoolReactor: mpReactor, - mempool: mp, - consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, - pexReactor: pexReactor, - evidenceReactor: evReactor, - indexerService: indexerService, - eventBus: eventBus, - eventSinks: eventSinks, shutdownOps: makeCloser(closers), @@ -433,7 +387,7 @@ func makeNode(cfg *config.Config, ConsensusState: csState, ConsensusReactor: csReactor, - BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), + BlockSyncReactor: bcReactor, PeerManager: peerManager, @@ -446,6 +400,10 @@ func makeNode(cfg *config.Config, }, } + if cfg.Mode == config.ModeValidator { + node.rpcEnv.PubKey = pubKey + } + node.rpcEnv.P2PTransport = node node.BaseService = *service.NewBaseService(logger, "Node", node) @@ -453,152 +411,69 @@ func makeNode(cfg *config.Config, return node, nil } -// makeSeedNode returns a new seed node, containing only p2p, pex reactor -func makeSeedNode(cfg *config.Config, - dbProvider config.DBProvider, - nodeKey types.NodeKey, - genesisDocProvider genesisDocProvider, - logger log.Logger, -) (service.Service, error) { - - genDoc, err := genesisDocProvider() - if err != nil { - return nil, err - } - - state, err := sm.MakeGenesisState(genDoc) - if err != nil { - return nil, err - - } - - nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) - if err != nil { - return nil, err - } - - // Setup Transport and Switch. - p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) - - peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) - if err != nil { - return nil, combineCloseError( - fmt.Errorf("failed to create peer manager: %w", err), - closer) - } - - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, nil)) - if err != nil { - return nil, combineCloseError( - fmt.Errorf("failed to create router: %w", err), - closer) - } - - var pexReactor service.Service - - pexReactor, err = createPEXReactor(logger, peerManager, router) - if err != nil { - return nil, combineCloseError(err, closer) - - } - - if cfg.RPC.PprofListenAddress != "" { +// OnStart starts the Node. It implements service.Service. +func (n *nodeImpl) OnStart(ctx context.Context) error { + if n.config.RPC.PprofListenAddress != "" { + rpcCtx, rpcCancel := context.WithCancel(ctx) + srv := &http.Server{Addr: n.config.RPC.PprofListenAddress, Handler: nil} go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-rpcCtx.Done(): + } }() - } - - node := &nodeImpl{ - config: cfg, - genesisDoc: genDoc, - - transport: transport, - nodeInfo: nodeInfo, - nodeKey: nodeKey, - peerManager: peerManager, - router: router, - shutdownOps: closer, + go func() { + n.logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) - pexReactor: pexReactor, + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("pprof server error", "err", err) + rpcCancel() + } + }() } - node.BaseService = *service.NewBaseService(logger, "SeedNode", node) - - return node, nil -} -// OnStart starts the Node. It implements service.Service. -func (n *nodeImpl) OnStart() error { now := tmtime.Now() genTime := n.genesisDoc.GenesisTime if genTime.After(now) { - n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + n.logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) time.Sleep(genTime.Sub(now)) } // Start the RPC server before the P2P server // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { - listeners, err := n.startRPC() + if n.config.RPC.ListenAddress != "" { + var err error + n.rpcListeners, err = n.rpcEnv.StartService(ctx, n.config) if err != nil { return err } - n.rpcListeners = listeners } - if n.config.Instrumentation.Prometheus && - n.config.Instrumentation.PrometheusListenAddr != "" { - n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) + if n.config.Instrumentation.Prometheus && n.config.Instrumentation.PrometheusListenAddr != "" { + n.prometheusSrv = n.startPrometheusServer(ctx, n.config.Instrumentation.PrometheusListenAddr) } // Start the transport. - ep, err := p2p.NewEndpoint(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) - if err != nil { + if err := n.router.Start(ctx); err != nil { return err } - if err := n.transport.Listen(ep); err != nil { - return err - } - n.isListening = true - if err = n.router.Start(); err != nil { - return err - } - - if n.config.Mode != config.ModeSeed { - if n.config.BlockSync.Enable { - if err := n.bcReactor.Start(); err != nil { - return err + for _, reactor := range n.services { + if err := reactor.Start(ctx); err != nil { + if errors.Is(err, service.ErrAlreadyStarted) { + continue } - } - // Start the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Start(); err != nil { - return err - } - - // Start the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Start(); err != nil { - return err - } - - // Start the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Start(); err != nil { - return err - } - - // Start the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Start(); err != nil { - return err + return fmt.Errorf("problem starting service '%T': %w ", reactor, err) } } - if err := n.pexReactor.Start(); err != nil { + if err := n.stateSyncReactor.Start(ctx); err != nil { return err } @@ -606,10 +481,7 @@ func (n *nodeImpl) OnStart() error { // TODO: We shouldn't run state sync if we already have state that has a // LastBlockHeight that is not InitialHeight if n.stateSync { - bcR, ok := n.bcReactor.(consensus.BlockSyncReactor) - if !ok { - return fmt.Errorf("this blockchain reactor does not support switching from state sync") - } + bcR := n.rpcEnv.BlockSyncReactor // we need to get the genesis state to get parameters such as state, err := sm.MakeGenesisState(n.genesisDoc) @@ -622,51 +494,54 @@ func (n *nodeImpl) OnStart() error { // At the beginning of the statesync start, we use the initialHeight as the event height // because of the statesync doesn't have the concreate state height before fetched the snapshot. d := types.EventDataStateSyncStatus{Complete: false, Height: state.InitialHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) + if err := n.stateSyncReactor.PublishStatus(ctx, d); err != nil { + n.logger.Error("failed to emit the statesync start event", "err", err) } - // FIXME: We shouldn't allow state sync to silently error out without - // bubbling up the error and gracefully shutting down the rest of the node - go func() { - n.Logger.Info("starting state sync") - state, err := n.stateSyncReactor.Sync(context.TODO()) - if err != nil { - n.Logger.Error("state sync failed; shutting down this node", "err", err) - // stop the node - if err := n.Stop(); err != nil { - n.Logger.Error("failed to shut down node", "err", err) - } - return + // RUN STATE SYNC NOW: + // + // TODO: Eventually this should run as part of some + // separate orchestrator + n.logger.Info("starting state sync") + ssState, err := n.stateSyncReactor.Sync(ctx) + if err != nil { + n.logger.Error("state sync failed; shutting down this node", "err", err) + // stop the node + if err := n.Stop(); err != nil { + n.logger.Error("failed to shut down node", "err", err) } + return err + } - n.consensusReactor.SetStateSyncingMetrics(0) + n.rpcEnv.ConsensusReactor.SetStateSyncingMetrics(0) - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) - } + if err := n.stateSyncReactor.PublishStatus(ctx, + types.EventDataStateSyncStatus{ + Complete: true, + Height: ssState.LastBlockHeight, + }); err != nil { + n.logger.Error("failed to emit the statesync start event", "err", err) + return err + } - // TODO: Some form of orchestrator is needed here between the state - // advancing reactors to be able to control which one of the three - // is running - if n.config.BlockSync.Enable { - // FIXME Very ugly to have these metrics bleed through here. - n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { - n.Logger.Error("failed to switch to block sync", "err", err) - return - } - - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) - } - - } else { - n.consensusReactor.SwitchToConsensus(state, true) - } - }() + // TODO: Some form of orchestrator is needed here between the state + // advancing reactors to be able to control which one of the three + // is running + // FIXME Very ugly to have these metrics bleed through here. + n.rpcEnv.ConsensusReactor.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(ctx, ssState); err != nil { + n.logger.Error("failed to switch to block sync", "err", err) + return err + } + + if err := bcR.PublishStatus(ctx, + types.EventDataBlockSyncStatus{ + Complete: false, + Height: ssState.LastBlockHeight, + }); err != nil { + n.logger.Error("failed to emit the block sync starting event", "err", err) + return err + } } return nil @@ -674,197 +549,61 @@ func (n *nodeImpl) OnStart() error { // OnStop stops the Node. It implements service.Service. func (n *nodeImpl) OnStop() { - - n.Logger.Info("Stopping Node") - - if n.eventBus != nil { - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) - } - } - if n.indexerService != nil { - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) - } - } + n.logger.Info("Stopping Node") for _, es := range n.eventSinks { if err := es.Stop(); err != nil { - n.Logger.Error("failed to stop event sink", "err", err) - } - } - - if n.config.Mode != config.ModeSeed { - // now stop the reactors - if n.config.BlockSync.Enable { - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) - } - } - - // Stop the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the consensus reactor", "err", err) - } - - // Stop the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the state sync reactor", "err", err) - } - - // Stop the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the mempool reactor", "err", err) + n.logger.Error("failed to stop event sink", "err", err) } - - // Stop the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the evidence reactor", "err", err) - } - } - - if err := n.pexReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } - - if err := n.transport.Close(); err != nil { - n.Logger.Error("Error closing transport", "err", err) + for _, reactor := range n.services { + reactor.Wait() } + n.stateSyncReactor.Wait() + n.router.Wait() n.isListening = false // finally stop the listeners / external services for _, l := range n.rpcListeners { - n.Logger.Info("Closing rpc listener", "listener", l) + n.logger.Info("Closing rpc listener", "listener", l) if err := l.Close(); err != nil { - n.Logger.Error("Error closing listener", "listener", l, "err", err) + n.logger.Error("error closing listener", "listener", l, "err", err) } } if pvsc, ok := n.privValidator.(service.Service); ok { - if err := pvsc.Stop(); err != nil { - n.Logger.Error("Error closing private validator", "err", err) - } + pvsc.Wait() } if n.prometheusSrv != nil { if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { // Error from closing listeners, or context timeout: - n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + n.logger.Error("Prometheus HTTP server Shutdown", "err", err) } } if err := n.shutdownOps(); err != nil { - n.Logger.Error("problem shutting down additional services", "err", err) - } -} - -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - if n.config.Mode == config.ModeValidator { - pubKey, err := n.privValidator.GetPubKey(context.TODO()) - if pubKey == nil || err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + if strings.TrimSpace(err.Error()) != "" { + n.logger.Error("problem shutting down additional services", "err", err) } - n.rpcEnv.PubKey = pubKey - } - if err := n.rpcEnv.InitGenesisChunks(); err != nil { - return nil, err - } - - listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - routes := n.rpcEnv.GetRoutes() - - if n.config.RPC.Unsafe { - n.rpcEnv.AddUnsafe(routes) } - - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - - // we may expose the rpc over both a unix and tcp socket - listeners := make([]net.Listener, len(listenAddrs)) - for i, listenAddr := range listenAddrs { - mux := http.NewServeMux() - rpcLogger := n.Logger.With("module", "rpc-server") - wmLogger := rpcLogger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(routes, - rpcserver.OnDisconnect(func(remoteAddr string) { - err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) - if err != nil && err != tmpubsub.ErrSubscriptionNotFound { - wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) - } - }), - rpcserver.ReadLimit(cfg.MaxBodyBytes), - ) - wm.SetLogger(wmLogger) - mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) - listener, err := rpcserver.Listen( - listenAddr, - cfg.MaxOpenConnections, - ) - if err != nil { - return nil, err + if n.blockStore != nil { + if err := n.blockStore.Close(); err != nil { + n.logger.Error("problem closing blockstore", "err", err) } - - var rootHandler http.Handler = mux - if n.config.RPC.IsCorsEnabled() { - corsMiddleware := cors.New(cors.Options{ - AllowedOrigins: n.config.RPC.CORSAllowedOrigins, - AllowedMethods: n.config.RPC.CORSAllowedMethods, - AllowedHeaders: n.config.RPC.CORSAllowedHeaders, - }) - rootHandler = corsMiddleware.Handler(mux) - } - if n.config.RPC.IsTLSEnabled() { - go func() { - if err := rpcserver.ServeTLS( - listener, - rootHandler, - n.config.RPC.CertFile(), - n.config.RPC.KeyFile(), - rpcLogger, - cfg, - ); err != nil { - n.Logger.Error("Error serving server with TLS", "err", err) - } - }() - } else { - go func() { - if err := rpcserver.Serve( - listener, - rootHandler, - rpcLogger, - cfg, - ); err != nil { - n.Logger.Error("Error serving server", "err", err) - } - }() + } + if n.stateStore != nil { + if err := n.stateStore.Close(); err != nil { + n.logger.Error("problem closing statestore", "err", err) } - - listeners[i] = listener } - - return listeners, nil } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics // collectors on addr. -func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { +func (n *nodeImpl) startPrometheusServer(ctx context.Context, addr string) *http.Server { srv := &http.Server{ Addr: addr, Handler: promhttp.InstrumentMetricHandler( @@ -874,34 +613,31 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { ), ), } + + promCtx, promCancel := context.WithCancel(ctx) go func() { - if err := srv.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-promCtx.Done(): } }() - return srv -} -// ConsensusReactor returns the Node's ConsensusReactor. -func (n *nodeImpl) ConsensusReactor() *consensus.Reactor { - return n.consensusReactor -} + go func() { + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + promCancel() + } + }() -// Mempool returns the Node's mempool. -func (n *nodeImpl) Mempool() mempool.Mempool { - return n.mempool + return srv } // EventBus returns the Node's EventBus. -func (n *nodeImpl) EventBus() *types.EventBus { - return n.eventBus -} - -// PrivValidator returns the Node's PrivValidator. -// XXX: for convenience only! -func (n *nodeImpl) PrivValidator() types.PrivValidator { - return n.privValidator +func (n *nodeImpl) EventBus() *eventbus.EventBus { + return n.rpcEnv.EventBus } // GenesisDoc returns the Node's GenesisDoc. @@ -946,11 +682,12 @@ func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { type nodeMetrics struct { consensus *consensus.Metrics - p2p *p2p.Metrics + indexer *indexer.Metrics mempool *mempool.Metrics + p2p *p2p.Metrics + proxy *proxy.Metrics state *sm.Metrics statesync *statesync.Metrics - proxy *proxy.Metrics } // metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. @@ -962,21 +699,23 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { return func(chainID string) *nodeMetrics { if cfg.Prometheus { return &nodeMetrics{ - consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + consensus: consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + indexer: indexer.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + mempool: mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + p2p: p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy: proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + state: sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + statesync: statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), } } return &nodeMetrics{ - consensus.NopMetrics(), - p2p.NopMetrics(), - mempool.NopMetrics(), - sm.NopMetrics(), - statesync.NopMetrics(), - proxy.NopMetrics(), + consensus: consensus.NopMetrics(), + indexer: indexer.NopMetrics(), + mempool: mempool.NopMetrics(), + p2p: p2p.NopMetrics(), + proxy: proxy.NopMetrics(), + state: sm.NopMetrics(), + statesync: statesync.NopMetrics(), } } } @@ -1008,61 +747,6 @@ func loadStateFromDBOrGenesisDocProvider( return state, nil } -func createAndStartPrivValidatorSocketClient( - listenAddr, - chainID string, - logger log.Logger, -) (types.PrivValidator, error) { - - pve, err := privval.NewSignerListener(listenAddr, logger) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - pvsc, err := privval.NewSignerClient(pve, chainID) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - - const ( - retries = 50 // 50 * 100ms = 5s total - timeout = 100 * time.Millisecond - ) - pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) - - return pvscWithRetries, nil -} - -func createAndStartPrivValidatorGRPCClient( - cfg *config.Config, - chainID string, - logger log.Logger, -) (types.PrivValidator, error) { - pvsc, err := tmgrpc.DialRemoteSigner( - cfg.PrivValidator, - chainID, - logger, - cfg.Instrumentation.Prometheus, - ) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO()) - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - - return pvsc, nil -} - func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, @@ -1070,7 +754,7 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt if conf.FilterPeers && proxyApp != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + res, err := proxyApp.Query().Query(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) if err != nil { @@ -1084,7 +768,7 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt } opts.FilterPeerByIP = func(ctx context.Context, ip net.IP, port uint16) error { - res, err := proxyApp.Query().QuerySync(ctx, abci.RequestQuery{ + res, err := proxyApp.Query().Query(ctx, abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/addr/%s", net.JoinHostPort(ip.String(), strconv.Itoa(int(port)))), }) if err != nil { diff --git a/node/node_test.go b/node/node_test.go index 6ffc4d96c9..29126b105c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -7,10 +7,10 @@ import ( "math" "net" "os" - "syscall" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -21,10 +21,11 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/store" @@ -38,101 +39,107 @@ import ( ) func TestNodeStartStop(t *testing.T) { - cfg := config.ResetTestRoot("node_node_test") + cfg, err := config.ResetTestRoot("node_node_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + ctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + logger := log.NewNopLogger() // create & start node - ns, err := newDefaultNode(cfg, log.TestingLogger()) + ns, err := newDefaultNode(ctx, cfg, logger) require.NoError(t, err) - require.NoError(t, ns.Start()) + n, ok := ns.(*nodeImpl) + require.True(t, ok) t.Cleanup(func() { - if ns.IsRunning() { - assert.NoError(t, ns.Stop()) - ns.Wait() + if n.IsRunning() { + bcancel() + n.Wait() } }) - n, ok := ns.(*nodeImpl) - require.True(t, ok) - - ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, n.Start(ctx)) + // wait for the node to produce a block + tctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(ctx, "node_test", types.EventQueryNewBlock) - require.NoError(t, err) - select { - case <-blocksSub.Out(): - case <-blocksSub.Canceled(): - t.Fatal("blocksSub was canceled") - case <-time.After(10 * time.Second): - t.Fatal("timed out waiting for the node to produce a block") - } + blocksSub, err := n.EventBus().SubscribeWithArgs(tctx, pubsub.SubscribeArgs{ + ClientID: "node_test", + Query: types.EventQueryNewBlock, + }) + require.NoError(t, err) + _, err = blocksSub.Next(tctx) + require.NoError(t, err, "waiting for event") - // stop the node - go func() { - err = n.Stop() - require.NoError(t, err) - }() + cancel() // stop the subscription context + bcancel() // stop the base context + n.Wait() - select { - case <-n.Quit(): - case <-time.After(5 * time.Second): - pid := os.Getpid() - p, err := os.FindProcess(pid) - if err != nil { - panic(err) - } - err = p.Signal(syscall.SIGABRT) - fmt.Println(err) - t.Fatal("timed out waiting for shutdown") - } + require.False(t, n.IsRunning(), "node must shut down") } -func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { +func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() - ns, err := newDefaultNode(conf, logger) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ns, err := newDefaultNode(ctx, conf, logger) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) t.Cleanup(func() { - if ns.IsRunning() { - assert.NoError(t, ns.Stop()) + cancel() + if n.IsRunning() { ns.Wait() } }) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) return n } func TestNodeDelayedStart(t *testing.T) { - cfg := config.ResetTestRoot("node_delayed_start_test") + cfg, err := config.ResetTestRoot("node_delayed_start_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // create & start node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, logger) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) - require.NoError(t, n.Start()) + require.NoError(t, n.Start(ctx)) startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } func TestNodeSetAppVersion(t *testing.T) { - cfg := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // create node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, logger) // default config uses the kvstore app - var appVersion uint64 = kvstore.ProtocolVersion + appVersion := kvstore.ProtocolVersion // check version is set in state state, err := n.stateStore.Load() @@ -146,15 +153,19 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) - dialerEndpoint := privval.NewSignerDialerEndpoint( - log.TestingLogger(), - dialer, - ) + dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer) privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) signerServer := privval.NewSignerServer( @@ -164,30 +175,40 @@ func TestNodeSetPrivValTCP(t *testing.T) { ) go func() { - err := signerServer.Start() - if err != nil { - panic(err) - } + err := signerServer.Start(ctx) + require.NoError(t, err) }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, cfg, log.TestingLogger()) - assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) + genDoc, err := defaultGenesisDocProviderFunc(cfg)() + require.NoError(t, err) + + pval, err := createPrivval(ctx, logger, cfg, genDoc, nil) + require.NoError(t, err) + + assert.IsType(t, &privval.RetrySignerClient{}, pval) } // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addrNoPrefix := testFreeAddr(t) - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix - n, err := newDefaultNode(cfg, log.TestingLogger()) + logger := log.NewNopLogger() + + n, err := newDefaultNode(ctx, cfg, logger) + assert.Error(t, err) if n != nil && n.IsRunning() { - assert.NoError(t, n.Stop()) + cancel() n.Wait() } } @@ -196,15 +217,19 @@ func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - cfg := config.ResetTestRoot("node_priv_val_tcp_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = "unix://" + tmpfile + logger := log.NewNopLogger() + dialer := privval.DialUnixFn(tmpfile) - dialerEndpoint := privval.NewSignerDialerEndpoint( - log.TestingLogger(), - dialer, - ) + dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) pvsc := privval.NewSignerServer( @@ -214,12 +239,17 @@ func TestNodeSetPrivValIPC(t *testing.T) { ) go func() { - err := pvsc.Start() + err := pvsc.Start(ctx) require.NoError(t, err) }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, cfg, log.TestingLogger()) - assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) + genDoc, err := defaultGenesisDocProviderFunc(cfg)() + require.NoError(t, err) + + pval, err := createPrivval(ctx, logger, cfg, genDoc, nil) + require.NoError(t, err) + + assert.IsType(t, &privval.RetrySignerClient{}, pval) } // testFreeAddr claims a free port so we don't block on listener being ready. @@ -237,15 +267,16 @@ func TestCreateProposalBlock(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) const height int64 = 1 state, stateDB, privVals := state(t, 1, height) @@ -257,15 +288,12 @@ func TestCreateProposalBlock(t *testing.T) { state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // Make EvidencePool evidenceDB := dbm.NewMemDB() @@ -277,7 +305,8 @@ func TestCreateProposalBlock(t *testing.T) { // than can fit in a block var currentBytes int64 for currentBytes <= maxEvidenceBytes { - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time.Now(), privVals[0], "test-chain") + require.NoError(t, err) currentBytes += int64(len(ev.Bytes())) evidencePool.ReportConflictingVotes(ev.VoteA, ev.VoteB) } @@ -306,17 +335,18 @@ func TestCreateProposalBlock(t *testing.T) { ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) - block, _ := blockExec.CreateProposalBlock( + block, _, err := blockExec.CreateProposalBlock( height, state, commit, proposerAddr, + nil, ) + require.NoError(t, err) // check that the part set does not exceed the maximum block size - partSet := block.MakePartSet(partSize) - // TODO(ismail): properly fix this test - // https://github.com/tendermint/tendermint/issues/77 - assert.Less(t, partSet.ByteSize(), int64(maxBytes)*2) + partSet, err := block.MakePartSet(partSize) + require.NoError(t, err) + assert.Less(t, partSet.ByteSize(), int64(maxBytes)) partSetFromHeader := types.NewPartSetFromHeader(partSet.Header()) for partSetFromHeader.Count() < partSetFromHeader.Total() { @@ -334,15 +364,17 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) const height int64 = 1 state, stateDB, _ := state(t, 1, height) @@ -354,15 +386,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) @@ -380,11 +410,13 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { ) commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) - block, _ := blockExec.CreateProposalBlock( + block, _, err := blockExec.CreateProposalBlock( height, state, commit, proposerAddr, + nil, ) + require.NoError(t, err) pb, err := block.ToProto() require.NoError(t, err) @@ -393,7 +425,8 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { assert.Less(t, int64(pb.Size()), maxBytes*2) // check that the part set does not exceed the maximum block size - partSet := block.MakePartSet(partSize) + partSet, err := block.MakePartSet(partSize) + require.NoError(t, err) assert.EqualValues(t, partSet.ByteSize(), int64(pb.Size())) } @@ -401,15 +434,16 @@ func TestMaxProposalBlockSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg := config.ResetTestRoot("node_create_proposal") + cfg, err := config.ResetTestRoot("node_create_proposal") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) state, stateDB, _ := state(t, types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) @@ -419,20 +453,17 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) tx := tmrand.Bytes(txLength - 6 - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs // At the end of the test, only the single big tx should be added @@ -492,11 +523,13 @@ func TestMaxProposalBlockSize(t *testing.T) { commit.Signatures = append(commit.Signatures, cs) } - block, partSet := blockExec.CreateProposalBlock( + block, partSet, err := blockExec.CreateProposalBlock( math.MaxInt64, state, commit, proposerAddr, + nil, ) + require.NoError(t, err) // this ensures that the header is at max size block.Header.Time = timestamp @@ -516,48 +549,66 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - cfg := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test") + require.NoError(t, err) cfg.Mode = config.ModeSeed defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) - ns, err := makeSeedNode(cfg, + logger := log.NewNopLogger() + + ns, err := makeSeedNode(ctx, + cfg, config.DefaultDBProvider, nodeKey, defaultGenesisDocProviderFunc(cfg), - log.TestingLogger(), + logger, ) + t.Cleanup(ns.Wait) require.NoError(t, err) - n, ok := ns.(*nodeImpl) + n, ok := ns.(*seedNodeImpl) require.True(t, ok) - err = n.Start() + err = n.Start(ctx) require.NoError(t, err) assert.True(t, n.pexReactor.IsRunning()) - require.NoError(t, n.Stop()) + cancel() + n.Wait() + assert.False(t, n.pexReactor.IsRunning()) } func TestNodeSetEventSink(t *testing.T) { - cfg := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot("node_app_version_test") + require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) - logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { - eventBus, err := createAndStartEventBus(logger) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, eventBus.Stop()) }) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + + t.Cleanup(eventBus.Wait) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - indexService, eventSinks, err := createAndStartIndexerService(cfg, - config.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + indexService, eventSinks, err := createAndStartIndexerService(ctx, cfg, + config.DefaultDBProvider, eventBus, logger, genDoc.ChainID, + indexer.NopMetrics()) require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + t.Cleanup(indexService.Wait) return eventSinks } cleanup := func(ns service.Service) func() { @@ -572,7 +623,7 @@ func TestNodeSetEventSink(t *testing.T) { if !n.IsRunning() { return } - assert.NoError(t, n.Stop()) + cancel() n.Wait() } } @@ -594,7 +645,7 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(cfg, logger) + ns, err := newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) assert.Contains(t, err.Error(), "unsupported event sink type") t.Cleanup(cleanup(ns)) @@ -606,56 +657,23 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") t.Cleanup(cleanup(ns)) - var psqlConn = "test" - - cfg.TxIndex.Indexer = []string{"psql"} - cfg.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, cfg) - - assert.Equal(t, 1, len(eventSinks)) - assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) - - cfg.TxIndex.Indexer = []string{"psql", "kv"} - cfg.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, cfg) - - assert.Equal(t, 2, len(eventSinks)) - // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. - if eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) - } else { - assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) - assert.Equal(t, indexer.KV, eventSinks[1].Type()) - } - - cfg.TxIndex.Indexer = []string{"kv", "psql"} - cfg.TxIndex.PsqlConn = psqlConn - eventSinks = setupTest(t, cfg) - - assert.Equal(t, 2, len(eventSinks)) - if eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) - } else { - assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) - assert.Equal(t, indexer.KV, eventSinks[1].Type()) - } + // N.B. We can't create a PSQL event sink without starting a postgres + // instance for it to talk to. The indexer service tests exercise that case. var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} - cfg.TxIndex.PsqlConn = psqlConn - ns, err = newDefaultNode(cfg, logger) + cfg.TxIndex.Indexer = []string{"null", "kv", "Kv"} + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) - cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} - cfg.TxIndex.PsqlConn = psqlConn - ns, err = newDefaultNode(cfg, logger) + cfg.TxIndex.Indexer = []string{"Null", "kV", "kv", "nUlL"} + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) assert.Contains(t, err.Error(), e.Error()) t.Cleanup(cleanup(ns)) @@ -697,21 +715,26 @@ func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.Pri } func TestLoadStateFromGenesis(t *testing.T) { - _ = loadStatefromGenesis(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _ = loadStatefromGenesis(ctx, t) } -func loadStatefromGenesis(t *testing.T) sm.State { +func loadStatefromGenesis(ctx context.Context, t *testing.T) sm.State { t.Helper() stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - cfg := config.ResetTestRoot("load_state_from_genesis") + cfg, err := config.ResetTestRoot("load_state_from_genesis") + require.NoError(t, err) loadedState, err := stateStore.Load() require.NoError(t, err) require.True(t, loadedState.IsEmpty()) - genDoc, _ := factory.RandGenesisDoc(cfg, 0, false, 10) + valSet, _ := factory.ValidatorSet(ctx, t, 0, 10) + genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil) state, err := loadStateFromDBOrGenesisDocProvider( stateStore, diff --git a/node/public.go b/node/public.go index c616eebac5..0d6f1d93e0 100644 --- a/node/public.go +++ b/node/public.go @@ -2,6 +2,7 @@ package node import ( + "context" "fmt" abciclient "github.com/tendermint/tendermint/abci/client" @@ -16,8 +17,12 @@ import ( // process that host their own process-local tendermint node. This is // equivalent to running tendermint in it's own process communicating // to an external ABCI application. -func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) { - return newDefaultNode(conf, logger) +func NewDefault( + ctx context.Context, + conf *config.Config, + logger log.Logger, +) (service.Service, error) { + return newDefaultNode(ctx, conf, logger) } // New constructs a tendermint node. The ClientCreator makes it @@ -26,7 +31,9 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // Genesis document: if the value is nil, the genesis document is read // from the file specified in the config, and otherwise the node uses // value of the final argument. -func New(conf *config.Config, +func New( + ctx context.Context, + conf *config.Config, logger log.Logger, cf abciclient.Creator, gen *types.GenesisDoc, @@ -51,7 +58,9 @@ func New(conf *config.Config, return nil, err } - return makeNode(conf, + return makeNode( + ctx, + conf, pval, nodeKey, cf, @@ -59,7 +68,7 @@ func New(conf *config.Config, config.DefaultDBProvider, logger) case config.ModeSeed: - return makeSeedNode(conf, config.DefaultDBProvider, nodeKey, genProvider, logger) + return makeSeedNode(ctx, conf, config.DefaultDBProvider, nodeKey, genProvider, logger) default: return nil, fmt.Errorf("%q is not a valid mode", conf.Mode) } diff --git a/node/seed.go b/node/seed.go new file mode 100644 index 0000000000..ef3e61df09 --- /dev/null +++ b/node/seed.go @@ -0,0 +1,171 @@ +package node + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/pex" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/libs/strings" + tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/types" +) + +type seedNodeImpl struct { + service.BaseService + logger log.Logger + + // config + config *config.Config + genesisDoc *types.GenesisDoc // initial validator set + + // network + peerManager *p2p.PeerManager + router *p2p.Router + nodeInfo types.NodeInfo + nodeKey types.NodeKey // our node privkey + isListening bool + + // services + pexReactor service.Service // for exchanging peer addresses + shutdownOps closer +} + +// makeSeedNode returns a new seed node, containing only p2p, pex reactor +func makeSeedNode( + ctx context.Context, + cfg *config.Config, + dbProvider config.DBProvider, + nodeKey types.NodeKey, + genesisDocProvider genesisDocProvider, + logger log.Logger, +) (service.Service, error) { + if !cfg.P2P.PexReactor { + return nil, errors.New("cannot run seed nodes with PEX disabled") + } + + genDoc, err := genesisDocProvider() + if err != nil { + return nil, err + } + + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return nil, err + } + + nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport and Switch. + p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) + + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + if err != nil { + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + closer) + } + + router, err := createRouter(ctx, logger, p2pMetrics, nodeInfo, nodeKey, + peerManager, cfg, nil) + if err != nil { + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + closer) + } + + pexReactor, err := pex.NewReactor(ctx, logger, peerManager, router.OpenChannel, peerManager.Subscribe(ctx)) + if err != nil { + return nil, combineCloseError(err, closer) + } + + node := &seedNodeImpl{ + config: cfg, + logger: logger, + genesisDoc: genDoc, + + nodeInfo: nodeInfo, + nodeKey: nodeKey, + peerManager: peerManager, + router: router, + + shutdownOps: closer, + + pexReactor: pexReactor, + } + node.BaseService = *service.NewBaseService(logger, "SeedNode", node) + + return node, nil +} + +// OnStart starts the Seed Node. It implements service.Service. +func (n *seedNodeImpl) OnStart(ctx context.Context) error { + if n.config.RPC.PprofListenAddress != "" { + rpcCtx, rpcCancel := context.WithCancel(ctx) + srv := &http.Server{Addr: n.config.RPC.PprofListenAddress, Handler: nil} + go func() { + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-rpcCtx.Done(): + } + }() + + go func() { + n.logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) + + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("pprof server error", "err", err) + rpcCancel() + } + }() + } + + now := tmtime.Now() + genTime := n.genesisDoc.GenesisTime + if genTime.After(now) { + n.logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + time.Sleep(genTime.Sub(now)) + } + + // Start the transport. + if err := n.router.Start(ctx); err != nil { + return err + } + n.isListening = true + + if n.config.P2P.PexReactor { + if err := n.pexReactor.Start(ctx); err != nil { + return err + } + } + + return nil +} + +// OnStop stops the Seed Node. It implements service.Service. +func (n *seedNodeImpl) OnStop() { + n.logger.Info("Stopping Node") + + n.pexReactor.Wait() + n.router.Wait() + n.isListening = false + + if err := n.shutdownOps(); err != nil { + if strings.TrimSpace(err.Error()) != "" { + n.logger.Error("problem shutting down additional services", "err", err) + } + } +} diff --git a/node/setup.go b/node/setup.go index 40ced410cc..68371c45a7 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,6 +2,7 @@ package node import ( "bytes" + "context" "errors" "fmt" "strings" @@ -9,15 +10,13 @@ import ( dbm "github.com/tendermint/tm-db" - abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -28,8 +27,11 @@ import ( "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" + tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" + "github.com/tendermint/tendermint/privval" + tmgrpc "github.com/tendermint/tendermint/privval/grpc" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -53,6 +55,10 @@ func makeCloser(cs []closer) closer { } } +func convertCancelCloser(cancel context.CancelFunc) closer { + return func() error { cancel(); return nil } +} + func combineCloseError(err error, cl closer) error { if err == nil { return cl() @@ -73,7 +79,7 @@ func initDBs( blockStoreDB, err := dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) if err != nil { - return nil, nil, func() error { return nil }, err + return nil, nil, func() error { return nil }, fmt.Errorf("unable to initialize blockstore: %w", err) } closers := []closer{} blockStore := store.NewBlockStore(blockStoreDB) @@ -81,7 +87,7 @@ func initDBs( stateDB, err := dbProvider(&config.DBContext{ID: "state", Config: cfg}) if err != nil { - return nil, nil, makeCloser(closers), err + return nil, nil, makeCloser(closers), fmt.Errorf("unable to initialize statestore: %w", err) } closers = append(closers, stateDB.Close) @@ -89,66 +95,35 @@ func initDBs( return blockStore, stateDB, makeCloser(closers), nil } -// nolint:lll -func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator, metrics) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("error starting proxy app connections: %v", err) - } - return proxyApp, nil -} - -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return nil, err - } - return eventBus, nil -} - func createAndStartIndexerService( + ctx context.Context, cfg *config.Config, dbProvider config.DBProvider, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, logger log.Logger, chainID string, + metrics *indexer.Metrics, ) (*indexer.Service, []indexer.EventSink, error) { eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) if err != nil { return nil, nil, err } - indexerService := indexer.NewIndexerService(eventSinks, eventBus) - indexerService.SetLogger(logger.With("module", "txindex")) + indexerService := indexer.NewService(indexer.ServiceArgs{ + Sinks: eventSinks, + EventBus: eventBus, + Logger: logger.With("module", "txindex"), + Metrics: metrics, + }) - if err := indexerService.Start(); err != nil { + if err := indexerService.Start(ctx); err != nil { return nil, nil, err } return indexerService, eventSinks, nil } -func doHandshake( - stateStore sm.Store, - state sm.State, - blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - eventBus types.BlockEventPublisher, - proxyApp proxy.AppConns, - consensusLogger log.Logger) error { - - handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil -} - -func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) { +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger, mode string) { // Log the version info. logger.Info("Version info", "tmVersion", version.TMVersion, @@ -164,17 +139,23 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL "state", state.Version.Consensus.Block, ) } - switch { - case mode == config.ModeFull: - consensusLogger.Info("This node is a fullnode") - case mode == config.ModeValidator: + + switch mode { + case config.ModeFull: + logger.Info("This node is a fullnode") + case config.ModeValidator: addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { - consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } else { - consensusLogger.Info("This node is a validator (NOT in the active validator set)", - "addr", addr, "pubKey", pubKey.Bytes()) + logger.Info("This node is a validator (NOT in the active validator set)", + "addr", addr, + "pubKey", pubKey.Bytes(), + ) } } } @@ -188,6 +169,7 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { } func createMempoolReactor( + ctx context.Context, cfg *config.Config, proxyApp proxy.AppConns, state sm.State, @@ -196,80 +178,40 @@ func createMempoolReactor( router *p2p.Router, logger log.Logger, ) (service.Service, mempool.Mempool, error) { + logger = logger.With("module", "mempool") - logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) - peerUpdates := peerManager.Subscribe() - - switch cfg.Mempool.Version { - case config.MempoolV0: - ch, err := router.OpenChannel(mempoolv0.GetChannelDescriptor(cfg.Mempool)) - if err != nil { - return nil, nil, err - } - - mp := mempoolv0.NewCListMempool( - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) - - reactor := mempoolv0.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - ch, - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactor, mp, nil - - case config.MempoolV1: - ch, err := router.OpenChannel(mempoolv1.GetChannelDescriptor(cfg.Mempool)) - if err != nil { - return nil, nil, err - } - - mp := mempoolv1.NewTxMempool( - logger, - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - ch, - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } + mp := mempool.NewTxMempool( + logger, + cfg.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempool.WithMetrics(memplMetrics), + mempool.WithPreCheck(sm.TxPreCheck(state)), + mempool.WithPostCheck(sm.TxPostCheck(state)), + ) - return reactor, mp, nil + reactor, err := mempool.NewReactor( + ctx, + logger, + cfg.Mempool, + peerManager, + mp, + router.OpenChannel, + peerManager.Subscribe(ctx), + ) + if err != nil { + return nil, nil, err + } - default: - return nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) + if cfg.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() } + + return reactor, mp, nil } func createEvidenceReactor( + ctx context.Context, cfg *config.Config, dbProvider config.DBProvider, stateDB dbm.DB, @@ -280,7 +222,7 @@ func createEvidenceReactor( ) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("unable to initialize evidence db: %w", err) } logger = logger.With("module", "evidence") @@ -290,55 +232,22 @@ func createEvidenceReactor( return nil, nil, fmt.Errorf("creating evidence pool: %w", err) } - ch, err := router.OpenChannel(evidence.GetChannelDescriptor()) - if err != nil { - return nil, nil, fmt.Errorf("creating evidence channel: %w", err) - } - - evidenceReactor := evidence.NewReactor( + evidenceReactor, err := evidence.NewReactor( + ctx, logger, - ch, - peerManager.Subscribe(), + router.OpenChannel, + peerManager.Subscribe(ctx), evidencePool, ) - - return evidenceReactor, evidencePool, nil -} - -func createBlockchainReactor( - logger log.Logger, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore *store.BlockStore, - csReactor *consensus.Reactor, - peerManager *p2p.PeerManager, - router *p2p.Router, - blockSync bool, - metrics *consensus.Metrics, -) (service.Service, error) { - - logger = logger.With("module", "blockchain") - - ch, err := router.OpenChannel(blocksync.GetChannelDescriptor()) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("creating evidence reactor: %w", err) } - peerUpdates := peerManager.Subscribe() - - reactor, err := blocksync.NewReactor( - logger, state.Copy(), blockExec, blockStore, csReactor, - ch, peerUpdates, blockSync, - metrics, - ) - if err != nil { - return nil, err - } - - return reactor, nil + return evidenceReactor, evidencePool, nil } func createConsensusReactor( + ctx context.Context, cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, @@ -348,13 +257,15 @@ func createConsensusReactor( privValidator types.PrivValidator, csMetrics *consensus.Metrics, waitSync bool, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, ) (*consensus.Reactor, *consensus.State, error) { + logger = logger.With("module", "consensus") - consensusState := consensus.NewState( + consensusState := consensus.NewState(ctx, + logger, cfg.Consensus, state.Copy(), blockExec, @@ -363,36 +274,23 @@ func createConsensusReactor( evidencePool, consensus.StateMetrics(csMetrics), ) - consensusState.SetLogger(logger) - if privValidator != nil && cfg.Mode == config.ModeValidator { - consensusState.SetPrivValidator(privValidator) - } - csChDesc := consensus.GetChannelDescriptors() - channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc)) - for idx := range csChDesc { - chd := csChDesc[idx] - ch, err := router.OpenChannel(chd) - if err != nil { - return nil, nil, err - } - - channels[ch.ID] = ch + if privValidator != nil && cfg.Mode == config.ModeValidator { + consensusState.SetPrivValidator(ctx, privValidator) } - peerUpdates := peerManager.Subscribe() - - reactor := consensus.NewReactor( + reactor, err := consensus.NewReactor( + ctx, logger, consensusState, - channels[consensus.StateChannel], - channels[consensus.DataChannel], - channels[consensus.VoteChannel], - channels[consensus.VoteSetBitsChannel], - peerUpdates, + router.OpenChannel, + peerManager.Subscribe(ctx), waitSync, - consensus.ReactorMetrics(csMetrics), + csMetrics, ) + if err != nil { + return nil, nil, err + } // Services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor. @@ -401,21 +299,22 @@ func createConsensusReactor( return reactor, consensusState, nil } -func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { - return p2p.NewMConnTransport( - logger, conn.DefaultMConnConfig(), []*p2p.ChannelDescriptor{}, - p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), - }, - ) -} - func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, nodeID types.NodeID, ) (*p2p.PeerManager, closer, error) { + selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress)) + if err != nil { + return nil, func() error { return nil }, fmt.Errorf("couldn't parse ExternalAddress %q: %w", cfg.P2P.ExternalAddress, err) + } + + privatePeerIDs := make(map[types.NodeID]struct{}) + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { + privatePeerIDs[types.NodeID(id)] = struct{}{} + } + var maxConns uint16 switch { @@ -425,12 +324,8 @@ func createPeerManager( maxConns = 64 } - privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { - privatePeerIDs[types.NodeID(id)] = struct{}{} - } - options := p2p.PeerManagerOptions{ + SelfAddress: selfAddr, MaxConnected: maxConns, MaxConnectedUpgrade: 4, MaxPeers: 1000, @@ -462,7 +357,7 @@ func createPeerManager( peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { - return nil, func() error { return nil }, err + return nil, func() error { return nil }, fmt.Errorf("unable to initialize peer store: %w", err) } peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options) @@ -480,41 +375,48 @@ func createPeerManager( } func createRouter( - p2pLogger log.Logger, + ctx context.Context, + logger log.Logger, p2pMetrics *p2p.Metrics, nodeInfo types.NodeInfo, - privKey crypto.PrivKey, + nodeKey types.NodeKey, peerManager *p2p.PeerManager, - transport p2p.Transport, - options p2p.RouterOptions, + cfg *config.Config, + proxyApp proxy.AppConns, ) (*p2p.Router, error) { + p2pLogger := logger.With("module", "p2p") + + transportConf := conn.DefaultMConnConfig() + transportConf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + transportConf.SendRate = cfg.P2P.SendRate + transportConf.RecvRate = cfg.P2P.RecvRate + transportConf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + transport := p2p.NewMConnTransport( + p2pLogger, transportConf, []*p2p.ChannelDescriptor{}, + p2p.MConnTransportOptions{ + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), + }, + ) + + ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) + if err != nil { + return nil, err + } + return p2p.NewRouter( + ctx, p2pLogger, p2pMetrics, nodeInfo, - privKey, + nodeKey.PrivKey, peerManager, []p2p.Transport{transport}, - options, + []p2p.Endpoint{ep}, + getRouterConfig(cfg, proxyApp), ) } -func createPEXReactor( - logger log.Logger, - peerManager *p2p.PeerManager, - router *p2p.Router, -) (service.Service, error) { - - channel, err := router.OpenChannel(pex.ChannelDescriptor()) - if err != nil { - return nil, err - } - - peerUpdates := peerManager.Subscribe() - return pex.NewReactor(logger, peerManager, channel, peerUpdates), nil -} - func makeNodeInfo( cfg *config.Config, nodeKey types.NodeKey, @@ -522,14 +424,13 @@ func makeNodeInfo( genDoc *types.GenesisDoc, state sm.State, ) (types.NodeInfo, error) { + txIndexerStatus := "off" if indexer.IndexingEnabled(eventSinks) { txIndexerStatus = "on" } - bcChannel := byte(blocksync.BlockSyncChannel) - nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ P2P: version.P2PProtocol, // global @@ -540,7 +441,7 @@ func makeNodeInfo( Network: genDoc.ChainID, Version: version.TMVersion, Channels: []byte{ - bcChannel, + byte(blocksync.BlockSyncChannel), byte(consensus.StateChannel), byte(consensus.DataChannel), byte(consensus.VoteChannel), @@ -563,16 +464,12 @@ func makeNodeInfo( nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := cfg.P2P.ExternalAddress - - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress } - nodeInfo.ListenAddr = lAddr - - err := nodeInfo.Validate() - return nodeInfo, err + return nodeInfo, nodeInfo.Validate() } func makeSeedNodeInfo( @@ -602,14 +499,110 @@ func makeSeedNodeInfo( nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := cfg.P2P.ExternalAddress + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress + } + + return nodeInfo, nodeInfo.Validate() +} + +func createAndStartPrivValidatorSocketClient( + ctx context.Context, + listenAddr, chainID string, + logger log.Logger, +) (types.PrivValidator, error) { + + pve, err := privval.NewSignerListener(listenAddr, logger) + if err != nil { + return nil, fmt.Errorf("starting validator listener: %w", err) + } + + pvsc, err := privval.NewSignerClient(ctx, pve, chainID) + if err != nil { + return nil, fmt.Errorf("starting validator client: %w", err) + } - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey(ctx) + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) } - nodeInfo.ListenAddr = lAddr + const ( + timeout = 100 * time.Millisecond + maxTime = 5 * time.Second + retries = int(maxTime / timeout) + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil +} + +func createAndStartPrivValidatorGRPCClient( + ctx context.Context, + cfg *config.Config, + chainID string, + logger log.Logger, +) (types.PrivValidator, error) { + pvsc, err := tmgrpc.DialRemoteSigner( + ctx, + cfg.PrivValidator, + chainID, + logger, + cfg.Instrumentation.Prometheus, + ) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey(ctx) + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + return pvsc, nil +} + +func makeDefaultPrivval(conf *config.Config) (*privval.FilePV, error) { + if conf.Mode == config.ModeValidator { + pval, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + if err != nil { + return nil, err + } + return pval, nil + } + + return nil, nil +} + +func createPrivval(ctx context.Context, logger log.Logger, conf *config.Config, genDoc *types.GenesisDoc, defaultPV *privval.FilePV) (types.PrivValidator, error) { + if conf.PrivValidator.ListenAddr != "" { + protocol, _ := tmnet.ProtocolAndAddress(conf.PrivValidator.ListenAddr) + // FIXME: we should return un-started services and + // then start them later. + switch protocol { + case "grpc": + privValidator, err := createAndStartPrivValidatorGRPCClient(ctx, conf, genDoc.ChainID, logger) + if err != nil { + return nil, fmt.Errorf("error with private validator grpc client: %w", err) + } + return privValidator, nil + default: + privValidator, err := createAndStartPrivValidatorSocketClient( + ctx, + conf.PrivValidator.ListenAddr, + genDoc.ChainID, + logger, + ) + if err != nil { + return nil, fmt.Errorf("error with private validator socket client: %w", err) + + } + return privValidator, nil + } + } - err := nodeInfo.Validate() - return nodeInfo, err + return defaultPV, nil } diff --git a/privval/file.go b/privval/file.go index 4ec918c704..728e0dc674 100644 --- a/privval/file.go +++ b/privval/file.go @@ -3,9 +3,10 @@ package privval import ( "bytes" "context" + "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -13,10 +14,10 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/internal/libs/tempfile" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" tmos "github.com/tendermint/tendermint/libs/os" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -32,14 +33,14 @@ const ( ) // A vote is either stepPrevote or stepPrecommit. -func voteToStep(vote *tmproto.Vote) int8 { +func voteToStep(vote *tmproto.Vote) (int8, error) { switch vote.Type { case tmproto.PrevoteType: - return stepPrevote + return stepPrevote, nil case tmproto.PrecommitType: - return stepPrecommit + return stepPrecommit, nil default: - panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) + return 0, fmt.Errorf("unknown vote type: %v", vote.Type) } } @@ -47,36 +48,67 @@ func voteToStep(vote *tmproto.Vote) int8 { // FilePVKey stores the immutable part of PrivValidator. type FilePVKey struct { - Address types.Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - PrivKey crypto.PrivKey `json:"priv_key"` + Address types.Address + PubKey crypto.PubKey + PrivKey crypto.PrivKey filePath string } +type filePVKeyJSON struct { + Address types.Address `json:"address"` + PubKey json.RawMessage `json:"pub_key"` + PrivKey json.RawMessage `json:"priv_key"` +} + +func (pvKey FilePVKey) MarshalJSON() ([]byte, error) { + pubk, err := jsontypes.Marshal(pvKey.PubKey) + if err != nil { + return nil, err + } + privk, err := jsontypes.Marshal(pvKey.PrivKey) + if err != nil { + return nil, err + } + return json.Marshal(filePVKeyJSON{ + Address: pvKey.Address, PubKey: pubk, PrivKey: privk, + }) +} + +func (pvKey *FilePVKey) UnmarshalJSON(data []byte) error { + var key filePVKeyJSON + if err := json.Unmarshal(data, &key); err != nil { + return err + } + if err := jsontypes.Unmarshal(key.PubKey, &pvKey.PubKey); err != nil { + return fmt.Errorf("decoding PubKey: %w", err) + } + if err := jsontypes.Unmarshal(key.PrivKey, &pvKey.PrivKey); err != nil { + return fmt.Errorf("decoding PrivKey: %w", err) + } + pvKey.Address = key.Address + return nil +} + // Save persists the FilePVKey to its filePath. -func (pvKey FilePVKey) Save() { +func (pvKey FilePVKey) Save() error { outFile := pvKey.filePath if outFile == "" { - panic("cannot save PrivValidator key: filePath not set") + return errors.New("cannot save PrivValidator key: filePath not set") } - jsonBytes, err := tmjson.MarshalIndent(pvKey, "", " ") + data, err := json.MarshalIndent(pvKey, "", " ") if err != nil { - panic(err) - } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) - if err != nil { - panic(err) + return err } - + return tempfile.WriteFileAtomic(outFile, data, 0600) } //------------------------------------------------------------------------------- // FilePVLastSignState stores the mutable part of PrivValidator. type FilePVLastSignState struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step int8 `json:"step"` Signature []byte `json:"signature,omitempty"` @@ -85,14 +117,14 @@ type FilePVLastSignState struct { filePath string } -// CheckHRS checks the given height, round, step (HRS) against that of the +// checkHRS checks the given height, round, step (HRS) against that of the // FilePVLastSignState. It returns an error if the arguments constitute a regression, // or if they match but the SignBytes are empty. // The returned boolean indicates whether the last Signature should be reused - // it returns true if the HRS matches the arguments and the SignBytes are not empty (indicating // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. -func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { +func (lss *FilePVLastSignState) checkHRS(height int64, round int32, step int8) (bool, error) { if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) @@ -127,19 +159,16 @@ func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) ( } // Save persists the FilePvLastSignState to its filePath. -func (lss *FilePVLastSignState) Save() { +func (lss *FilePVLastSignState) Save() error { outFile := lss.filePath if outFile == "" { - panic("cannot save FilePVLastSignState: filePath not set") + return errors.New("cannot save FilePVLastSignState: filePath not set") } - jsonBytes, err := tmjson.MarshalIndent(lss, "", " ") + jsonBytes, err := json.MarshalIndent(lss, "", " ") if err != nil { - panic(err) - } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) - if err != nil { - panic(err) + return err } + return tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) } //------------------------------------------------------------------------------- @@ -200,12 +229,12 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) (*FilePV, error) { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } pvKey := FilePVKey{} - err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + err = json.Unmarshal(keyJSONBytes, &pvKey) if err != nil { return nil, fmt.Errorf("error reading PrivValidator key from %v: %w", keyFilePath, err) } @@ -218,11 +247,11 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { return nil, err } - err = tmjson.Unmarshal(stateJSONBytes, &pvState) + err = json.Unmarshal(stateJSONBytes, &pvState) if err != nil { return nil, fmt.Errorf("error reading PrivValidator state from %v: %w", stateFilePath, err) } @@ -239,17 +268,23 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err // LoadOrGenFilePV loads a FilePV from the given filePaths // or else generates a new one and saves it to the filePaths. func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { - var ( - pv *FilePV - err error - ) if tmos.FileExists(keyFilePath) { - pv, err = LoadFilePV(keyFilePath, stateFilePath) - } else { - pv, err = GenFilePV(keyFilePath, stateFilePath, "") - pv.Save() + pv, err := LoadFilePV(keyFilePath, stateFilePath) + if err != nil { + return nil, err + } + return pv, nil + } + pv, err := GenFilePV(keyFilePath, stateFilePath, "") + if err != nil { + return nil, err } - return pv, err + + if err := pv.Save(); err != nil { + return nil, err + } + + return pv, nil } // GetAddress returns the address of the validator. @@ -268,7 +303,7 @@ func (pv *FilePV) GetPubKey(ctx context.Context) (crypto.PubKey, error) { // chainID. Implements PrivValidator. func (pv *FilePV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { if err := pv.signVote(chainID, vote); err != nil { - return fmt.Errorf("error signing vote: %v", err) + return fmt.Errorf("error signing vote: %w", err) } return nil } @@ -277,27 +312,29 @@ func (pv *FilePV) SignVote(ctx context.Context, chainID string, vote *tmproto.Vo // the chainID. Implements PrivValidator. func (pv *FilePV) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { if err := pv.signProposal(chainID, proposal); err != nil { - return fmt.Errorf("error signing proposal: %v", err) + return fmt.Errorf("error signing proposal: %w", err) } return nil } // Save persists the FilePV to disk. -func (pv *FilePV) Save() { - pv.Key.Save() - pv.LastSignState.Save() +func (pv *FilePV) Save() error { + if err := pv.Key.Save(); err != nil { + return err + } + return pv.LastSignState.Save() } // Reset resets all fields in the FilePV. // NOTE: Unsafe! -func (pv *FilePV) Reset() { +func (pv *FilePV) Reset() error { var sig []byte pv.LastSignState.Height = 0 pv.LastSignState.Round = 0 pv.LastSignState.Step = 0 pv.LastSignState.Signature = sig pv.LastSignState.SignBytes = nil - pv.Save() + return pv.Save() } // String returns a string representation of the FilePV. @@ -317,11 +354,16 @@ func (pv *FilePV) String() string { // It may need to set the timestamp as well if the vote is otherwise the same as // a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { - height, round, step := vote.Height, vote.Round, voteToStep(vote) + step, err := voteToStep(vote) + if err != nil { + return err + } + height := vote.Height + round := vote.Round lss := pv.LastSignState - sameHRS, err := lss.CheckHRS(height, round, step) + sameHRS, err := lss.checkHRS(height, round, step) if err != nil { return err } @@ -336,13 +378,19 @@ func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { if sameHRS { if bytes.Equal(signBytes, lss.SignBytes) { vote.Signature = lss.Signature - } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(lss.SignBytes, signBytes); ok { + } else { + timestamp, ok, err := checkVotesOnlyDifferByTimestamp(lss.SignBytes, signBytes) + if err != nil { + return err + } + if !ok { + return errors.New("conflicting data") + } + vote.Timestamp = timestamp vote.Signature = lss.Signature - } else { - err = fmt.Errorf("conflicting data") } - return err + return nil } // It passed the checks. Sign the vote @@ -350,20 +398,20 @@ func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { if err != nil { return err } - pv.saveSigned(height, round, step, signBytes, sig) + if err := pv.saveSigned(height, round, step, signBytes, sig); err != nil { + return err + } vote.Signature = sig return nil } // signProposal checks if the proposal is good to sign and sets the proposal signature. -// It may need to set the timestamp as well if the proposal is otherwise the same as -// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error { height, round, step := proposal.Height, proposal.Round, stepPropose lss := pv.LastSignState - sameHRS, err := lss.CheckHRS(height, round, step) + sameHRS, err := lss.checkHRS(height, round, step) if err != nil { return err } @@ -373,18 +421,12 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. // If signbytes are the same, use the last signature. - // If they only differ by timestamp, use last timestamp and signature - // Otherwise, return error if sameHRS { - if bytes.Equal(signBytes, lss.SignBytes) { - proposal.Signature = lss.Signature - } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(lss.SignBytes, signBytes); ok { - proposal.Timestamp = timestamp - proposal.Signature = lss.Signature - } else { - err = fmt.Errorf("conflicting data") + if !bytes.Equal(signBytes, lss.SignBytes) { + return errors.New("conflicting data") } - return err + proposal.Signature = lss.Signature + return nil } // It passed the checks. Sign the proposal @@ -392,34 +434,34 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error if err != nil { return err } - pv.saveSigned(height, round, step, signBytes, sig) + if err := pv.saveSigned(height, round, step, signBytes, sig); err != nil { + return err + } proposal.Signature = sig return nil } // Persist height/round/step and signature -func (pv *FilePV) saveSigned(height int64, round int32, step int8, - signBytes []byte, sig []byte) { - +func (pv *FilePV) saveSigned(height int64, round int32, step int8, signBytes []byte, sig []byte) error { pv.LastSignState.Height = height pv.LastSignState.Round = round pv.LastSignState.Step = step pv.LastSignState.Signature = sig pv.LastSignState.SignBytes = signBytes - pv.LastSignState.Save() + return pv.LastSignState.Save() } //----------------------------------------------------------------------------------------- // returns the timestamp from the lastSignBytes. // returns true if the only difference in the votes is their timestamp. -func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { +func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool, error) { var lastVote, newVote tmproto.CanonicalVote if err := protoio.UnmarshalDelimited(lastSignBytes, &lastVote); err != nil { - panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) + return time.Time{}, false, fmt.Errorf("LastSignBytes cannot be unmarshalled into vote: %w", err) } if err := protoio.UnmarshalDelimited(newSignBytes, &newVote); err != nil { - panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) + return time.Time{}, false, fmt.Errorf("signBytes cannot be unmarshalled into vote: %w", err) } lastTime := lastVote.Timestamp @@ -428,25 +470,5 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T lastVote.Timestamp = now newVote.Timestamp = now - return lastTime, proto.Equal(&newVote, &lastVote) -} - -// returns the timestamp from the lastSignBytes. -// returns true if the only difference in the proposals is their timestamp -func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal tmproto.CanonicalProposal - if err := protoio.UnmarshalDelimited(lastSignBytes, &lastProposal); err != nil { - panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) - } - if err := protoio.UnmarshalDelimited(newSignBytes, &newProposal); err != nil { - panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) - } - - lastTime := lastProposal.Timestamp - // set the times to the same value and check equality - now := tmtime.Now() - lastProposal.Timestamp = now - newProposal.Timestamp = now - - return lastTime, proto.Equal(&newProposal, &lastProposal) + return lastTime, proto.Equal(&newVote, &lastVote), nil } diff --git a/privval/file_test.go b/privval/file_test.go index 680428ac2c..9e0c3d691a 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -3,8 +3,8 @@ package privval import ( "context" "encoding/base64" + "encoding/json" "fmt" - "io/ioutil" "os" "testing" "time" @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -22,32 +21,33 @@ import ( ) func TestGenLoadValidator(t *testing.T) { - assert := assert.New(t) - - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) height := int64(100) privVal.LastSignState.Height = height - privVal.Save() + require.NoError(t, privVal.Save()) addr := privVal.GetAddress() privVal, err = LoadFilePV(tempKeyFile.Name(), tempStateFile.Name()) - assert.NoError(err) - assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") - assert.Equal(height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") + assert.NoError(t, err) + assert.Equal(t, addr, privVal.GetAddress(), "expected privval addr to be the same") + assert.Equal(t, height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") } func TestResetValidator(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) @@ -62,24 +62,22 @@ func TestResetValidator(t *testing.T) { randBytes := tmrand.Bytes(tmhash.Size) blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) - err = privVal.SignVote(context.Background(), "mychainid", vote.ToProto()) + err = privVal.SignVote(ctx, "mychainid", vote.ToProto()) assert.NoError(t, err, "expected no error signing vote") // priv val after signing is not same as empty assert.NotEqual(t, privVal.LastSignState, emptyState) // priv val after AcceptNewConnection is same as empty - privVal.Reset() + require.NoError(t, privVal.Reset()) assert.Equal(t, privVal.LastSignState, emptyState) } func TestLoadOrGenValidator(t *testing.T) { - assert := assert.New(t) - - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) tempKeyFilePath := tempKeyFile.Name() if err := os.Remove(tempKeyFilePath); err != nil { @@ -95,12 +93,10 @@ func TestLoadOrGenValidator(t *testing.T) { addr := privVal.GetAddress() privVal, err = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) require.NoError(t, err) - assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") + assert.Equal(t, addr, privVal.GetAddress(), "expected privval addr to be the same") } func TestUnmarshalValidatorState(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // create some fixed values serialized := `{ "height": "1", @@ -109,23 +105,21 @@ func TestUnmarshalValidatorState(t *testing.T) { }` val := FilePVLastSignState{} - err := tmjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + err := json.Unmarshal([]byte(serialized), &val) + require.NoError(t, err) // make sure the values match - assert.EqualValues(val.Height, 1) - assert.EqualValues(val.Round, 1) - assert.EqualValues(val.Step, 1) + assert.EqualValues(t, val.Height, 1) + assert.EqualValues(t, val.Round, 1) + assert.EqualValues(t, val.Step, 1) // export it and make sure it is the same - out, err := tmjson.Marshal(val) - require.Nil(err, "%+v", err) - assert.JSONEq(serialized, string(out)) + out, err := json.Marshal(val) + require.NoError(t, err) + assert.JSONEq(t, serialized, string(out)) } func TestUnmarshalValidatorKey(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // create some fixed values privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() @@ -148,27 +142,28 @@ func TestUnmarshalValidatorKey(t *testing.T) { }`, addr, pubB64, privB64) val := FilePVKey{} - err := tmjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + err := json.Unmarshal([]byte(serialized), &val) + require.NoError(t, err) // make sure the values match - assert.EqualValues(addr, val.Address) - assert.EqualValues(pubKey, val.PubKey) - assert.EqualValues(privKey, val.PrivKey) + assert.EqualValues(t, addr, val.Address) + assert.EqualValues(t, pubKey, val.PubKey) + assert.EqualValues(t, privKey, val.PrivKey) // export it and make sure it is the same - out, err := tmjson.Marshal(val) - require.Nil(err, "%+v", err) - assert.JSONEq(serialized, string(out)) + out, err := json.Marshal(val) + require.NoError(t, err) + assert.JSONEq(t, serialized, string(out)) } func TestSignVote(t *testing.T) { - assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) @@ -187,12 +182,13 @@ func TestSignVote(t *testing.T) { // sign a vote for first time vote := newVote(privVal.Key.Address, 0, height, round, voteType, block1) v := vote.ToProto() - err = privVal.SignVote(context.Background(), "mychainid", v) - assert.NoError(err, "expected no error signing vote") + + err = privVal.SignVote(ctx, "mychainid", v) + assert.NoError(t, err, "expected no error signing vote") // try to sign the same vote again; should be fine - err = privVal.SignVote(context.Background(), "mychainid", v) - assert.NoError(err, "expected no error on signing same vote") + err = privVal.SignVote(ctx, "mychainid", v) + assert.NoError(t, err, "expected no error on signing same vote") // now try some bad votes cases := []*types.Vote{ @@ -203,26 +199,26 @@ func TestSignVote(t *testing.T) { } for _, c := range cases { - cpb := c.ToProto() - err = privVal.SignVote(context.Background(), "mychainid", cpb) - assert.Error(err, "expected error on signing conflicting vote") + assert.Error(t, privVal.SignVote(ctx, "mychainid", c.ToProto()), + "expected error on signing conflicting vote") } // try signing a vote with a different time stamp sig := vote.Signature vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) - err = privVal.SignVote(context.Background(), "mychainid", v) - assert.NoError(err) - assert.Equal(sig, vote.Signature) + err = privVal.SignVote(ctx, "mychainid", v) + assert.NoError(t, err) + assert.Equal(t, sig, vote.Signature) } func TestSignProposal(t *testing.T) { - assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) @@ -237,80 +233,55 @@ func TestSignProposal(t *testing.T) { height, round := int64(10), int32(1) // sign a proposal for first time - proposal := newProposal(height, round, block1) + ts := tmtime.Now() + proposal := newProposal(height, round, block1, ts) pbp := proposal.ToProto() - err = privVal.SignProposal(context.Background(), "mychainid", pbp) - assert.NoError(err, "expected no error signing proposal") + + err = privVal.SignProposal(ctx, "mychainid", pbp) + assert.NoError(t, err, "expected no error signing proposal") // try to sign the same proposal again; should be fine - err = privVal.SignProposal(context.Background(), "mychainid", pbp) - assert.NoError(err, "expected no error on signing same proposal") + err = privVal.SignProposal(ctx, "mychainid", pbp) + assert.NoError(t, err, "expected no error on signing same proposal") // now try some bad Proposals cases := []*types.Proposal{ - newProposal(height, round-1, block1), // round regression - newProposal(height-1, round, block1), // height regression - newProposal(height-2, round+4, block1), // height regression and different round - newProposal(height, round, block2), // different block + newProposal(height, round-1, block1, ts), // round regression + newProposal(height-1, round, block1, ts), // height regression + newProposal(height-2, round+4, block1, ts), // height regression and different round + newProposal(height, round, block2, ts), // different block + newProposal(height, round, block1, ts.Add(time.Second)), // different timestamp } for _, c := range cases { - err = privVal.SignProposal(context.Background(), "mychainid", c.ToProto()) - assert.Error(err, "expected error on signing conflicting proposal") + assert.Errorf(t, privVal.SignProposal(ctx, "mychainid", c.ToProto()), + "expected error on signing conflicting proposal") } - - // try signing a proposal with a different time stamp - sig := proposal.Signature - proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - err = privVal.SignProposal(context.Background(), "mychainid", pbp) - assert.NoError(err) - assert.Equal(sig, proposal.Signature) } func TestDifferByTimestamp(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempKeyFile, err := os.CreateTemp("", "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp("", "priv_validator_state_") + require.NoError(t, err) privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) randbytes := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} height, round := int64(10), int32(1) chainID := "mychainid" - // test proposal - { - proposal := newProposal(height, round, block1) - pb := proposal.ToProto() - err := privVal.SignProposal(context.Background(), chainID, pb) - assert.NoError(t, err, "expected no error signing proposal") - signBytes := types.ProposalSignBytes(chainID, pb) - - sig := proposal.Signature - timeStamp := proposal.Timestamp - - // manipulate the timestamp. should get changed back - pb.Timestamp = pb.Timestamp.Add(time.Millisecond) - var emptySig []byte - proposal.Signature = emptySig - err = privVal.SignProposal(context.Background(), "mychainid", pb) - assert.NoError(t, err, "expected no error on signing same proposal") - - assert.Equal(t, timeStamp, pb.Timestamp) - assert.Equal(t, signBytes, types.ProposalSignBytes(chainID, pb)) - assert.Equal(t, sig, proposal.Signature) - } - // test vote { voteType := tmproto.PrevoteType blockID := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) v := vote.ToProto() - err := privVal.SignVote(context.Background(), "mychainid", v) - assert.NoError(t, err, "expected no error signing vote") + err := privVal.SignVote(ctx, "mychainid", v) + require.NoError(t, err, "expected no error signing vote") signBytes := types.VoteSignBytes(chainID, v) sig := v.Signature @@ -320,8 +291,8 @@ func TestDifferByTimestamp(t *testing.T) { v.Timestamp = v.Timestamp.Add(time.Millisecond) var emptySig []byte v.Signature = emptySig - err = privVal.SignVote(context.Background(), "mychainid", v) - assert.NoError(t, err, "expected no error on signing same vote") + err = privVal.SignVote(ctx, "mychainid", v) + require.NoError(t, err, "expected no error on signing same vote") assert.Equal(t, timeStamp, v.Timestamp) assert.Equal(t, signBytes, types.VoteSignBytes(chainID, v)) @@ -342,11 +313,11 @@ func newVote(addr types.Address, idx int32, height int64, round int32, } } -func newProposal(height int64, round int32, blockID types.BlockID) *types.Proposal { +func newProposal(height int64, round int32, blockID types.BlockID, t time.Time) *types.Proposal { return &types.Proposal{ Height: height, Round: round, BlockID: blockID, - Timestamp: tmtime.Now(), + Timestamp: t, } } diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 98730df191..86bf4bb2b4 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -6,7 +6,8 @@ import ( "testing" "time" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/test/bufconn" "github.com/stretchr/testify/assert" @@ -23,7 +24,7 @@ import ( const chainID = "chain-id" -func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { +func dialer(t *testing.T, pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { listener := bufconn.Listen(1024 * 1024) server := grpc.NewServer() @@ -32,11 +33,7 @@ func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(conte privvalproto.RegisterPrivValidatorAPIServer(server, s) - go func() { - if err := server.Serve(listener); err != nil { - panic(err) - } - }() + go func() { require.NoError(t, server.Serve(listener)) }() return server, func(context.Context, string) (net.Conn, error) { return listener.Dial() @@ -45,38 +42,43 @@ func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(conte func TestSignerClient_GetPubKey(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mockPV := types.NewMockPV() - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) - if err != nil { - panic(err) - } + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) + require.NoError(t, err) defer conn.Close() client, err := tmgrpc.NewSignerClient(conn, chainID, logger) require.NoError(t, err) - pk, err := client.GetPubKey(context.Background()) + pk, err := client.GetPubKey(ctx) require.NoError(t, err) assert.Equal(t, mockPV.PrivKey.PubKey(), pk) } func TestSignerClient_SignVote(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() mockPV := types.NewMockPV() - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) - if err != nil { - panic(err) - } + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) + require.NoError(t, err) defer conn.Close() client, err := tmgrpc.NewSignerClient(conn, chainID, logger) @@ -108,28 +110,30 @@ func TestSignerClient_SignVote(t *testing.T) { pbHave := have.ToProto() - err = client.SignVote(context.Background(), chainID, pbHave) + err = client.SignVote(ctx, chainID, pbHave) require.NoError(t, err) pbWant := want.ToProto() - require.NoError(t, mockPV.SignVote(context.Background(), chainID, pbWant)) + require.NoError(t, mockPV.SignVote(ctx, chainID, pbWant)) assert.Equal(t, pbWant.Signature, pbHave.Signature) } func TestSignerClient_SignProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() mockPV := types.NewMockPV() - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer)) - if err != nil { - panic(err) - } + conn, err := grpc.DialContext(ctx, "", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(dialer), + ) + require.NoError(t, err) defer conn.Close() client, err := tmgrpc.NewSignerClient(conn, chainID, logger) @@ -157,12 +161,12 @@ func TestSignerClient_SignProposal(t *testing.T) { pbHave := have.ToProto() - err = client.SignProposal(context.Background(), chainID, pbHave) + err = client.SignProposal(ctx, chainID, pbHave) require.NoError(t, err) pbWant := want.ToProto() - require.NoError(t, mockPV.SignProposal(context.Background(), chainID, pbWant)) + require.NoError(t, mockPV.SignProposal(ctx, chainID, pbWant)) assert.Equal(t, pbWant.Signature, pbHave.Signature) } diff --git a/privval/grpc/server_test.go b/privval/grpc/server_test.go index 9fec9f2fd6..78e9c79edd 100644 --- a/privval/grpc/server_test.go +++ b/privval/grpc/server_test.go @@ -34,14 +34,18 @@ func TestGetPubKey(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + s := tmgrpc.NewSignerServer(ChainID, tc.pv, logger) req := &privvalproto.PubKeyRequest{ChainId: ChainID} - resp, err := s.GetPubKey(context.Background(), req) + resp, err := s.GetPubKey(ctx, req) if tc.err { require.Error(t, err) } else { - pk, err := tc.pv.GetPubKey(context.Background()) + pk, err := tc.pv.GetPubKey(ctx) require.NoError(t, err) assert.Equal(t, resp.PubKey.GetEd25519(), pk.Bytes()) } @@ -105,16 +109,20 @@ func TestSignVote(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + s := tmgrpc.NewSignerServer(ChainID, tc.pv, logger) req := &privvalproto.SignVoteRequest{ChainId: ChainID, Vote: tc.have.ToProto()} - resp, err := s.SignVote(context.Background(), req) + resp, err := s.SignVote(ctx, req) if tc.err { require.Error(t, err) } else { pbVote := tc.want.ToProto() - require.NoError(t, tc.pv.SignVote(context.Background(), ChainID, pbVote)) + require.NoError(t, tc.pv.SignVote(ctx, ChainID, pbVote)) assert.Equal(t, pbVote.Signature, resp.Vote.Signature) } @@ -172,15 +180,19 @@ func TestSignProposal(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + s := tmgrpc.NewSignerServer(ChainID, tc.pv, logger) req := &privvalproto.SignProposalRequest{ChainId: ChainID, Proposal: tc.have.ToProto()} - resp, err := s.SignProposal(context.Background(), req) + resp, err := s.SignProposal(ctx, req) if tc.err { require.Error(t, err) } else { pbProposal := tc.want.ToProto() - require.NoError(t, tc.pv.SignProposal(context.Background(), ChainID, pbProposal)) + require.NoError(t, tc.pv.SignProposal(ctx, ChainID, pbProposal)) assert.Equal(t, pbProposal.Signature, resp.Proposal.Signature) } }) diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 413acca8eb..0361139dac 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "os" "time" @@ -14,8 +13,9 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" ) @@ -65,7 +65,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(ca) + bs, err := os.ReadFile(ca) if err != nil { log.Error("failed to read ca cert:", "error", err) os.Exit(1) @@ -88,6 +88,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( + ctx context.Context, cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, @@ -98,7 +99,7 @@ func DialRemoteSigner( transportSecurity = GenerateTLS(cfg.ClientCertificateFile(), cfg.ClientKeyFile(), cfg.RootCAFile(), logger) } else { - transportSecurity = grpc.WithInsecure() + transportSecurity = grpc.WithTransportCredentials(insecure.NewCredentials()) logger.Info("Using an insecure gRPC connection!") } @@ -110,7 +111,6 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) - ctx := context.Background() _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { diff --git a/privval/msgs_test.go b/privval/msgs_test.go index 7ac9f2c5d6..bbd3f6319f 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -35,6 +35,10 @@ func exampleVote() *types.Vote { }, ValidatorAddress: crypto.AddressHash([]byte("validator_address")), ValidatorIndex: 56789, + VoteExtension: types.VoteExtension{ + AppDataToSign: []byte("app_data_signed"), + AppDataSelfAuthenticating: []byte("app_data_self_authenticating"), + }, } } @@ -57,7 +61,6 @@ func exampleProposal() *types.Proposal { } } -// nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() ppk, err := encoding.PubKeyToProto(pk) @@ -84,8 +87,8 @@ func TestPrivvalVectors(t *testing.T) { {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12240a220a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230"}, {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, - {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1a760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, - {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, + {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1aa8010aa501080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb034a2f0a0f6170705f646174615f7369676e6564121c6170705f646174615f73656c665f61757468656e7469636174696e67"}, + {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22a8010aa501080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb034a2f0a0f6170705f646174615f7369676e6564121c6170705f646174615f73656c665f61757468656e7469636174696e67"}, {"Vote Response with error", &privproto.SignedVoteResponse{Vote: tmproto.Vote{}, Error: remoteError}, "22250a11220212002a0b088092b8c398feffffff0112100801120c697427732061206572726f72"}, {"Proposal Request", &privproto.SignProposalRequest{Proposal: proposalpb}, "2a700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, {"Proposal Response", &privproto.SignedProposalResponse{Proposal: *proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index ccd9834e44..6dacc9a288 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -34,15 +34,15 @@ func (sc *RetrySignerClient) IsConnected() bool { return sc.next.IsConnected() } -func (sc *RetrySignerClient) WaitForConnection(maxWait time.Duration) error { - return sc.next.WaitForConnection(maxWait) +func (sc *RetrySignerClient) WaitForConnection(ctx context.Context, maxWait time.Duration) error { + return sc.next.WaitForConnection(ctx, maxWait) } //-------------------------------------------------------- // Implement PrivValidator -func (sc *RetrySignerClient) Ping() error { - return sc.next.Ping() +func (sc *RetrySignerClient) Ping(ctx context.Context) error { + return sc.next.Ping(ctx) } func (sc *RetrySignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { diff --git a/privval/secret_connection.go b/privval/secret_connection.go index ffa5d36edf..1e179cf41f 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -24,9 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/async" tmprivval "github.com/tendermint/tendermint/proto/tendermint/privval" ) @@ -80,11 +80,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } @@ -99,7 +99,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* ) // Generate ephemeral keys for perfect forward secrecy. - locEphPub, locEphPriv := genEphKeys() + locEphPub, locEphPriv, err := genEphKeys() + if err != nil { + return nil, err + } // Write local ephemeral pubkey and receive one too. // NOTE: every 32-byte string is accepted as a Curve25519 public key (see @@ -132,7 +135,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 // on the transcript state (which itself also uses HKDF-SHA2 to derive a key // from the dhSecret). - recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) + recvSecret, sendSecret, err := deriveSecrets(dhSecret, locIsLeast) + if err != nil { + return nil, err + } const challengeSize = 32 var challenge [challengeSize]byte @@ -214,7 +220,10 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { // encrypt the frame sc.sendAead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil) - incrNonce(sc.sendNonce) + if err := incrNonce(sc.sendNonce); err != nil { + return err + } + // end encryption _, err = sc.conn.Write(sealedFrame) @@ -258,7 +267,9 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { if err != nil { return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) } - incrNonce(sc.recvNonce) + if err = incrNonce(sc.recvNonce); err != nil { + return + } // end decryption // copy checkLength worth into data, @@ -288,14 +299,13 @@ func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { return sc.conn.(net.Conn).SetWriteDeadline(t) } -func genEphKeys() (ephPub, ephPriv *[32]byte) { - var err error +func genEphKeys() (ephPub, ephPriv *[32]byte, err error) { // TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek), // we do not "clamp" the private key scalar: // see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74 ephPub, ephPriv, err = box.GenerateKey(crand.Reader) if err != nil { - panic("Could not generate ephemeral key-pair") + return } return } @@ -339,14 +349,14 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt func deriveSecrets( dhSecret *[32]byte, locIsLeast bool, -) (recvSecret, sendSecret *[aeadKeySize]byte) { +) (recvSecret, sendSecret *[aeadKeySize]byte, err error) { hash := sha256.New hkdf := hkdf.New(hash, dhSecret[:], nil, secretConnKeyAndChallengeGen) // get enough data for 2 aead keys, and a 32 byte challenge res := new([2*aeadKeySize + 32]byte) - _, err := io.ReadFull(hkdf, res[:]) + _, err = io.ReadFull(hkdf, res[:]) if err != nil { - panic(err) + return nil, nil, err } recvSecret = new([aeadKeySize]byte) @@ -454,13 +464,14 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four // bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes // (little-endian in nonce[4:]). -func incrNonce(nonce *[aeadNonceSize]byte) { +func incrNonce(nonce *[aeadNonceSize]byte) error { counter := binary.LittleEndian.Uint64(nonce[4:]) if counter == math.MaxUint64 { // Terminates the session and makes sure the nonce would not re-used. // See https://github.com/tendermint/tendermint/issues/3531 - panic("can't increase nonce without overflow") + return errors.New("can't increase nonce without overflow") } counter++ binary.LittleEndian.PutUint64(nonce[4:], counter) + return nil } diff --git a/privval/signer_client.go b/privval/signer_client.go index 5e5b32a928..3247a74b7d 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -15,6 +16,7 @@ import ( // SignerClient implements PrivValidator. // Handles remote validator connections that provide signing services type SignerClient struct { + logger log.Logger endpoint *SignerListenerEndpoint chainID string } @@ -23,19 +25,28 @@ var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) -func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { +func NewSignerClient(ctx context.Context, endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(ctx); err != nil { return nil, fmt.Errorf("failed to start listener endpoint: %w", err) } } - return &SignerClient{endpoint: endpoint, chainID: chainID}, nil + return &SignerClient{ + logger: endpoint.logger, + endpoint: endpoint, + chainID: chainID, + }, nil } // Close closes the underlying connection func (sc *SignerClient) Close() error { - return sc.endpoint.Close() + err := sc.endpoint.Stop() + cerr := sc.endpoint.Close() + if err != nil { + return err + } + return cerr } // IsConnected indicates with the signer is connected to a remote signing service @@ -44,18 +55,18 @@ func (sc *SignerClient) IsConnected() bool { } // WaitForConnection waits maxWait for a connection or returns a timeout error -func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { - return sc.endpoint.WaitForConnection(maxWait) +func (sc *SignerClient) WaitForConnection(ctx context.Context, maxWait time.Duration) error { + return sc.endpoint.WaitForConnection(ctx, maxWait) } //-------------------------------------------------------- // Implement PrivValidator // Ping sends a ping request to the remote signer -func (sc *SignerClient) Ping() error { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) +func (sc *SignerClient) Ping(ctx context.Context) error { + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { - sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) + sc.logger.Error("SignerClient::Ping", "err", err) return nil } @@ -70,7 +81,7 @@ func (sc *SignerClient) Ping() error { // GetPubKey retrieves a public key from a remote signer // returns an error if client is not able to provide the key func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID})) + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID})) if err != nil { return nil, fmt.Errorf("send: %w", err) } @@ -93,7 +104,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { // SignVote requests a remote signer to sign a vote func (sc *SignerClient) SignVote(ctx context.Context, chainID string, vote *tmproto.Vote) error { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.SignVoteRequest{Vote: vote, ChainId: chainID})) + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.SignVoteRequest{Vote: vote, ChainId: chainID})) if err != nil { return err } @@ -113,7 +124,7 @@ func (sc *SignerClient) SignVote(ctx context.Context, chainID string, vote *tmpr // SignProposal requests a remote signer to sign a proposal func (sc *SignerClient) SignProposal(ctx context.Context, chainID string, proposal *tmproto.Proposal) error { - response, err := sc.endpoint.SendRequest(mustWrapMsg( + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg( &privvalproto.SignProposalRequest{Proposal: proposal, ChainId: chainID}, )) if err != nil { diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 9aa49e7098..d8bb258284 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -6,11 +6,13 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -23,370 +25,377 @@ type signerTestCase struct { mockPV types.PrivValidator signerClient *SignerClient signerServer *SignerServer + name string + closer context.CancelFunc } -func getSignerTestCases(t *testing.T) []signerTestCase { +func getSignerTestCases(ctx context.Context, t *testing.T, logger log.Logger) []signerTestCase { + t.Helper() + testCases := make([]signerTestCase, 0) // Get test cases for each possible dialer (DialTCP / DialUnix / etc) - for _, dtc := range getDialerTestCases(t) { + for idx, dtc := range getDialerTestCases(t) { chainID := tmrand.Str(12) mockPV := types.NewMockPV() + cctx, ccancel := context.WithCancel(ctx) // get a pair of signer listener, signer dialer endpoints - sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) - sc, err := NewSignerClient(sl, chainID) + sl, sd := getMockEndpoints(cctx, t, logger, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(cctx, sl, chainID) require.NoError(t, err) ss := NewSignerServer(sd, chainID, mockPV) - err = ss.Start() - require.NoError(t, err) + require.NoError(t, ss.Start(cctx)) - tc := signerTestCase{ + testCases = append(testCases, signerTestCase{ + name: fmt.Sprintf("Case%d%T_%s", idx, dtc.dialer, chainID), + closer: ccancel, chainID: chainID, mockPV: mockPV, signerClient: sc, signerServer: ss, - } - - testCases = append(testCases, tc) + }) + t.Cleanup(ss.Wait) + t.Cleanup(sc.endpoint.Wait) } return testCases } func TestSignerClose(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - err := tc.signerClient.Close() - assert.NoError(t, err) + t.Cleanup(leaktest.Check(t)) - err = tc.signerServer.Stop() - assert.NoError(t, err) + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(bctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer leaktest.Check(t) + defer func() { + tc.closer() + tc.signerClient.endpoint.Wait() + tc.signerServer.Wait() + }() + + assert.NoError(t, tc.signerClient.Close()) + assert.NoError(t, tc.signerServer.Stop()) + }) } } func TestSignerPing(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() - err := tc.signerClient.Ping() + for _, tc := range getSignerTestCases(ctx, t, logger) { + err := tc.signerClient.Ping(ctx) assert.NoError(t, err) } } func TestSignerGetPubKey(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + t.Cleanup(leaktest.Check(t)) - pubKey, err := tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedPubKey, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - assert.Equal(t, expectedPubKey, pubKey) + logger := log.NewNopLogger() - pubKey, err = tc.signerClient.GetPubKey(context.Background()) - require.NoError(t, err) - expectedpk, err := tc.mockPV.GetPubKey(context.Background()) - require.NoError(t, err) - expectedAddr := expectedpk.Address() + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - assert.Equal(t, expectedAddr, pubKey.Address()) + pubKey, err := tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + + assert.Equal(t, expectedPubKey, pubKey) + + pubKey, err = tc.signerClient.GetPubKey(ctx) + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey(ctx) + require.NoError(t, err) + expectedAddr := expectedpk.Address() + + assert.Equal(t, expectedAddr, pubKey.Address()) + }) } } func TestSignerProposal(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - want := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + have := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + want := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - require.NoError(t, tc.mockPV.SignProposal(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignProposal(context.Background(), tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignProposal(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignProposal(ctx, tc.chainID, have.ToProto())) + + assert.Equal(t, want.Signature, have.Signature) + }) - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVote(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) + assert.Equal(t, want.Signature, have.Signature) + }) } } func TestSignerVoteResetDeadline(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - time.Sleep(testTimeoutReadWrite2o3) + time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) - // TODO(jleni): Clarify what is actually being tested + // TODO(jleni): Clarify what is actually being tested - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) + assert.Equal(t, want.Signature, have.Signature) + }) } } func TestSignerVoteKeepAlive(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, } - }) - // Check that even if the client does not request a - // signature for a long time. The service is still available + // Check that even if the client does not request a + // signature for a long time. The service is still available - // in this particular case, we use the dialer logger to ensure that - // test messages are properly interleaved in the test logs - tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") - time.Sleep(testTimeoutReadWrite * 3) - tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + time.Sleep(testTimeoutReadWrite * 3) - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, want.ToProto())) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, have.ToProto())) + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, have.ToProto())) - assert.Equal(t, want.Signature, have.Signature) + assert.Equal(t, want.Signature, have.Signature) + }) } } func TestSignerSignProposalErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - // Replace service with a mock that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + proposal := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + Signature: []byte("signature"), } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - proposal := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - Signature: []byte("signature"), - } - - err := tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - err = tc.mockPV.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) - - err = tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Error(t, err) + err := tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + err = tc.mockPV.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) + + err = tc.signerClient.SignProposal(ctx, tc.chainID, proposal.ToProto()) + require.Error(t, err) + }) } } func TestSignerSignVoteErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - valAddr := tmrand.Bytes(crypto.AddressSize) - vote := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - ValidatorAddress: valAddr, - ValidatorIndex: 1, - Signature: []byte("signature"), - } - - // Replace signer service privval with one that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Signature: []byte("signature"), } - }) - err := tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + err := tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) + err = tc.mockPV.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) - err = tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Error(t, err) + err = tc.signerClient.SignVote(ctx, tc.chainID, vote.ToProto()) + require.Error(t, err) + }) } } @@ -413,28 +422,27 @@ func brokenHandler(ctx context.Context, privVal types.PrivValidator, request pri } func TestSignerUnexpectedResponse(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc.signerServer.privVal = types.NewMockPV() - tc.mockPV = types.NewMockPV() + t.Cleanup(leaktest.Check(t)) - tc.signerServer.SetRequestHandler(brokenHandler) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} + tc.signerServer.privVal = types.NewMockPV() + tc.mockPV = types.NewMockPV() - e := tc.signerClient.SignVote(context.Background(), tc.chainID, want.ToProto()) - assert.EqualError(t, e, "empty response") + tc.signerServer.SetRequestHandler(brokenHandler) + + ts := time.Now() + want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} + + e := tc.signerClient.SignVote(ctx, tc.chainID, want.ToProto()) + assert.EqualError(t, e, "empty response") + }) } } diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go index 93d26b0439..b291a7ef5e 100644 --- a/privval/signer_dialer_endpoint.go +++ b/privval/signer_dialer_endpoint.go @@ -1,6 +1,7 @@ package privval import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -58,6 +59,7 @@ func NewSignerDialerEndpoint( retryWait: defaultRetryWaitMilliseconds * time.Millisecond, maxConnRetries: defaultMaxDialRetries, } + sd.signerEndpoint.logger = logger sd.BaseService = *service.NewBaseService(logger, "SignerDialerEndpoint", sd) sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second @@ -69,28 +71,42 @@ func NewSignerDialerEndpoint( return sd } -func (sd *SignerDialerEndpoint) ensureConnection() error { +func (sd *SignerDialerEndpoint) OnStart(context.Context) error { return nil } +func (sd *SignerDialerEndpoint) OnStop() {} + +func (sd *SignerDialerEndpoint) ensureConnection(ctx context.Context) error { if sd.IsConnected() { return nil } + timer := time.NewTimer(0) + defer timer.Stop() retries := 0 for retries < sd.maxConnRetries { + if err := ctx.Err(); err != nil { + return err + } conn, err := sd.dialer() if err != nil { retries++ - sd.Logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + sd.logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + // Wait between retries - time.Sleep(sd.retryWait) + timer.Reset(sd.retryWait) + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + } } else { sd.SetConnection(conn) - sd.Logger.Debug("SignerDialer: Connection Ready") + sd.logger.Debug("SignerDialer: Connection Ready") return nil } } - sd.Logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) + sd.logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) return ErrNoConnection } diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index 0d46ca6925..8810bdf85b 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -1,12 +1,14 @@ package privval import ( + "context" "fmt" "net" + "sync" "time" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" ) @@ -17,8 +19,9 @@ const ( type signerEndpoint struct { service.BaseService + logger log.Logger - connMtx tmsync.Mutex + connMtx sync.Mutex conn net.Conn timeoutReadWrite time.Duration @@ -52,11 +55,13 @@ func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net. } // TryGetConnection retrieves a connection if it is already available -func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { +func (se *signerEndpoint) WaitConnection(ctx context.Context, connectionAvailableCh chan net.Conn, maxWait time.Duration) error { se.connMtx.Lock() defer se.connMtx.Unlock() select { + case <-ctx.Done(): + return ctx.Err() case se.conn = <-connectionAvailableCh: case <-time.After(maxWait): return ErrConnectionTimeout @@ -104,7 +109,7 @@ func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { err = fmt.Errorf("empty error: %w", ErrReadTimeout) } - se.Logger.Debug("Dropping [read]", "obj", se) + se.logger.Debug("Dropping [read]", "obj", se) se.dropConnection() } @@ -149,7 +154,7 @@ func (se *signerEndpoint) isConnected() bool { func (se *signerEndpoint) dropConnection() { if se.conn != nil { if err := se.conn.Close(); err != nil { - se.Logger.Error("signerEndpoint::dropConnection", "err", err) + se.logger.Error("signerEndpoint::dropConnection", "err", err) } se.conn = nil } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 292e7a4762..12c9159735 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -1,11 +1,12 @@ package privval import ( + "context" "fmt" "net" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -38,7 +39,7 @@ type SignerListenerEndpoint struct { pingTimer *time.Ticker pingInterval time.Duration - instanceMtx tmsync.Mutex // Ensures instance public methods access, i.e. SendRequest + instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest } // NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. @@ -52,6 +53,7 @@ func NewSignerListenerEndpoint( timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, } + sl.signerEndpoint.logger = logger sl.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sl) sl.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second @@ -63,7 +65,7 @@ func NewSignerListenerEndpoint( } // OnStart implements service.Service. -func (sl *SignerListenerEndpoint) OnStart() error { +func (sl *SignerListenerEndpoint) OnStart(ctx context.Context) error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) @@ -71,8 +73,8 @@ func (sl *SignerListenerEndpoint) OnStart() error { sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond sl.pingTimer = time.NewTicker(sl.pingInterval) - go sl.serviceLoop() - go sl.pingLoop() + go sl.serviceLoop(ctx) + go sl.pingLoop(ctx) sl.connectRequestCh <- struct{}{} @@ -88,7 +90,7 @@ func (sl *SignerListenerEndpoint) OnStop() { // Stop listening if sl.listener != nil { if err := sl.listener.Close(); err != nil { - sl.Logger.Error("Closing Listener", "err", err) + sl.logger.Error("Closing Listener", "err", err) sl.listener = nil } } @@ -97,18 +99,18 @@ func (sl *SignerListenerEndpoint) OnStop() { } // WaitForConnection waits maxWait for a connection or returns a timeout error -func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error { +func (sl *SignerListenerEndpoint) WaitForConnection(ctx context.Context, maxWait time.Duration) error { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() - return sl.ensureConnection(maxWait) + return sl.ensureConnection(ctx, maxWait) } // SendRequest ensures there is a connection, sends a request and waits for a response -func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*privvalproto.Message, error) { +func (sl *SignerListenerEndpoint) SendRequest(ctx context.Context, request privvalproto.Message) (*privvalproto.Message, error) { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() - err := sl.ensureConnection(sl.timeoutAccept) + err := sl.ensureConnection(ctx, sl.timeoutAccept) if err != nil { return nil, err } @@ -129,7 +131,7 @@ func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*pr return &res, nil } -func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error { +func (sl *SignerListenerEndpoint) ensureConnection(ctx context.Context, maxWait time.Duration) error { if sl.IsConnected() { return nil } @@ -140,9 +142,9 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error } // block until connected or timeout - sl.Logger.Info("SignerListener: Blocking for connection") + sl.logger.Info("SignerListener: Blocking for connection") sl.triggerConnect() - return sl.WaitConnection(sl.connectionAvailableCh, maxWait) + return sl.WaitConnection(ctx, sl.connectionAvailableCh, maxWait) } func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { @@ -151,7 +153,7 @@ func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { } // wait for a new conn - sl.Logger.Info("SignerListener: Listening for new connection") + sl.logger.Info("SignerListener: Listening for new connection") conn, err := sl.listener.Accept() if err != nil { return nil, err @@ -172,19 +174,19 @@ func (sl *SignerListenerEndpoint) triggerReconnect() { sl.triggerConnect() } -func (sl *SignerListenerEndpoint) serviceLoop() { +func (sl *SignerListenerEndpoint) serviceLoop(ctx context.Context) { for { select { case <-sl.connectRequestCh: { conn, err := sl.acceptNewConnection() if err == nil { - sl.Logger.Info("SignerListener: Connected") + sl.logger.Info("SignerListener: Connected") // We have a good connection, wait for someone that needs one otherwise cancellation select { case sl.connectionAvailableCh <- conn: - case <-sl.Quit(): + case <-ctx.Done(): return } } @@ -194,24 +196,24 @@ func (sl *SignerListenerEndpoint) serviceLoop() { default: } } - case <-sl.Quit(): + case <-ctx.Done(): return } } } -func (sl *SignerListenerEndpoint) pingLoop() { +func (sl *SignerListenerEndpoint) pingLoop(ctx context.Context) { for { select { case <-sl.pingTimer.C: { - _, err := sl.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) + _, err := sl.SendRequest(ctx, mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { - sl.Logger.Error("SignerListener: Ping timeout") + sl.logger.Error("SignerListener: Ping timeout") sl.triggerReconnect() } } - case <-sl.Quit(): + case <-ctx.Done(): return } } diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index cbd45e6cee..4c9c31c42c 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,10 +1,12 @@ package privval import ( + "context" "net" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -38,6 +40,13 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { retries = 10 ) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -60,8 +69,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } }(ln, attemptCh) - dialerEndpoint := NewSignerDialerEndpoint( - log.TestingLogger(), + dialerEndpoint := NewSignerDialerEndpoint(logger, DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), ) SignerDialerEndpointTimeoutReadWrite(time.Millisecond)(dialerEndpoint) @@ -71,13 +79,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { mockPV := types.NewMockPV() signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - err = signerServer.Start() + err = signerServer.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := signerServer.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(signerServer.Wait) select { case attempts := <-attemptCh: @@ -88,15 +92,22 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } func TestRetryConnToRemoteSigner(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + for _, tc := range getDialerTestCases(t) { var ( - logger = log.TestingLogger() chainID = tmrand.Str(12) mockPV = types.NewMockPV() endpointIsOpenCh = make(chan struct{}) thisConnTimeout = testTimeoutReadWrite - listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + listenerEndpoint = newSignerListenerEndpoint(t, logger, tc.addr, thisConnTimeout) ) + t.Cleanup(listenerEndpoint.Wait) dialerEndpoint := NewSignerDialerEndpoint( logger, @@ -107,15 +118,12 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) - t.Cleanup(func() { - if err := listenerEndpoint.Stop(); err != nil { - t.Error(err) - } - }) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, signerServer.Start()) + require.NoError(t, signerServer.Start(ctx)) assert.True(t, signerServer.IsRunning()) + t.Cleanup(signerServer.Wait) + <-endpointIsOpenCh if err := signerServer.Stop(); err != nil { t.Error(err) @@ -128,13 +136,10 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) // let some pings pass - require.NoError(t, signerServer2.Start()) + require.NoError(t, signerServer2.Start(ctx)) assert.True(t, signerServer2.IsRunning()) - t.Cleanup(func() { - if err := signerServer2.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(signerServer2.Wait) + t.Cleanup(func() { _ = signerServer2.Stop() }) // give the client some time to re-establish the conn to the remote signer // should see sth like this in the logs: @@ -145,14 +150,11 @@ func TestRetryConnToRemoteSigner(t *testing.T) { } } -func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { +func newSignerListenerEndpoint(t *testing.T, logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { proto, address := tmnet.ProtocolAndAddress(addr) ln, err := net.Listen(proto, address) - logger.Info("SignerListener: Listening", "proto", proto, "address", address) - if err != nil { - panic(err) - } + require.NoError(t, err) var listener net.Listener @@ -175,22 +177,30 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite ) } -func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { +func startListenerEndpointAsync( + ctx context.Context, + t *testing.T, + sle *SignerListenerEndpoint, + endpointIsOpenCh chan struct{}, +) { + t.Helper() + go func(sle *SignerListenerEndpoint) { - require.NoError(t, sle.Start()) + require.NoError(t, sle.Start(ctx)) assert.True(t, sle.IsRunning()) close(endpointIsOpenCh) }(sle) } func getMockEndpoints( + ctx context.Context, t *testing.T, + logger log.Logger, addr string, socketDialer SocketDialer, ) (*SignerListenerEndpoint, *SignerDialerEndpoint) { var ( - logger = log.TestingLogger() endpointIsOpenCh = make(chan struct{}) dialerEndpoint = NewSignerDialerEndpoint( @@ -198,15 +208,15 @@ func getMockEndpoints( socketDialer, ) - listenerEndpoint = newSignerListenerEndpoint(logger, addr, testTimeoutReadWrite) + listenerEndpoint = newSignerListenerEndpoint(t, logger, addr, testTimeoutReadWrite) ) SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, dialerEndpoint.Start()) + require.NoError(t, dialerEndpoint.Start(ctx)) assert.True(t, dialerEndpoint.IsRunning()) <-endpointIsOpenCh diff --git a/privval/signer_server.go b/privval/signer_server.go index 24bf67cc5f..4945b81506 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -3,8 +3,8 @@ package privval import ( "context" "io" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -24,7 +24,7 @@ type SignerServer struct { chainID string privVal types.PrivValidator - handlerMtx tmsync.Mutex + handlerMtx sync.Mutex validationRequestHandler ValidationRequestHandlerFunc } @@ -36,20 +36,20 @@ func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal typ validationRequestHandler: DefaultValidationRequestHandler, } - ss.BaseService = *service.NewBaseService(endpoint.Logger, "SignerServer", ss) + ss.BaseService = *service.NewBaseService(endpoint.logger, "SignerServer", ss) return ss } // OnStart implements service.Service. -func (ss *SignerServer) OnStart() error { - go ss.serviceLoop() +func (ss *SignerServer) OnStart(ctx context.Context) error { + go ss.serviceLoop(ctx) return nil } // OnStop implements service.Service. func (ss *SignerServer) OnStop() { - ss.endpoint.Logger.Debug("SignerServer: OnStop calling Close") + ss.endpoint.logger.Debug("SignerServer: OnStop calling Close") _ = ss.endpoint.Close() } @@ -60,7 +60,7 @@ func (ss *SignerServer) SetRequestHandler(validationRequestHandler ValidationReq ss.validationRequestHandler = validationRequestHandler } -func (ss *SignerServer) servicePendingRequest() { +func (ss *SignerServer) servicePendingRequest(ctx context.Context) { if !ss.IsRunning() { return // Ignore error from closing. } @@ -68,7 +68,7 @@ func (ss *SignerServer) servicePendingRequest() { req, err := ss.endpoint.ReadMessage() if err != nil { if err != io.EOF { - ss.Logger.Error("SignerServer: HandleMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: HandleMessage", "err", err) } return } @@ -78,31 +78,29 @@ func (ss *SignerServer) servicePendingRequest() { // limit the scope of the lock ss.handlerMtx.Lock() defer ss.handlerMtx.Unlock() - res, err = ss.validationRequestHandler(context.TODO(), ss.privVal, req, ss.chainID) // todo + res, err = ss.validationRequestHandler(ctx, ss.privVal, req, ss.chainID) // todo if err != nil { // only log the error; we'll reply with an error in res - ss.Logger.Error("SignerServer: handleMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: handleMessage", "err", err) } } err = ss.endpoint.WriteMessage(res) if err != nil { - ss.Logger.Error("SignerServer: writeMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: writeMessage", "err", err) } } -func (ss *SignerServer) serviceLoop() { +func (ss *SignerServer) serviceLoop(ctx context.Context) { for { select { + case <-ctx.Done(): + return default: - err := ss.endpoint.ensureConnection() - if err != nil { + if err := ss.endpoint.ensureConnection(ctx); err != nil { return } - ss.servicePendingRequest() - - case <-ss.Quit(): - return + ss.servicePendingRequest(ctx) } } } diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index 32c07c5911..1ff738cbbd 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -9,10 +9,20 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + tmnet "github.com/tendermint/tendermint/libs/net" ) +// getFreeLocalhostAddrPort returns a free localhost:port address +func getFreeLocalhostAddrPort(t *testing.T) string { + t.Helper() + port, err := tmnet.GetFreePort() + require.NoError(t, err) + + return fmt.Sprintf("127.0.0.1:%d", port) +} + func getDialerTestCases(t *testing.T) []dialerTestCase { - tcpAddr := GetFreeLocalhostAddrPort() + tcpAddr := getFreeLocalhostAddrPort(t) unixFilePath, err := testUnixAddr() require.NoError(t, err) unixAddr := fmt.Sprintf("unix://%s", unixFilePath) @@ -31,7 +41,7 @@ func getDialerTestCases(t *testing.T) []dialerTestCase { func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { // Generate a networking timeout - tcpAddr := GetFreeLocalhostAddrPort() + tcpAddr := getFreeLocalhostAddrPort(t) dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) @@ -39,7 +49,7 @@ func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { } func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { - tcpAddr := GetFreeLocalhostAddrPort() + tcpAddr := getFreeLocalhostAddrPort(t) dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 5e95ec10ce..c411332b25 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -1,12 +1,12 @@ package privval import ( - "io/ioutil" "net" "os" "testing" "time" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" ) @@ -29,7 +29,7 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket func testUnixAddr() (string, error) { - f, err := ioutil.TempFile("", "tendermint-privval-test-*") + f, err := os.CreateTemp("", "tendermint-privval-test-*") if err != nil { return "", err } @@ -108,9 +108,7 @@ func TestListenerTimeoutReadWrite(t *testing.T) { for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { _, err := dialer() - if err != nil { - panic(err) - } + require.NoError(t, err) }(tc.dialer) c, err := tc.listener.Accept() diff --git a/privval/utils.go b/privval/utils.go index 0b8cced342..1d6681b452 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -51,12 +51,3 @@ func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEnd return pve, nil } - -// GetFreeLocalhostAddrPort returns a free localhost:port address -func GetFreeLocalhostAddrPort() string { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return fmt.Sprintf("127.0.0.1:%d", port) -} diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index a8d9679fe9..a58a23c642 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -84,7 +84,6 @@ func TestStatusResponse_Validate(t *testing.T) { } } -// nolint:lll func TestBlockchainMessageVectors(t *testing.T) { block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil, nil, nil) block.Version.Block = 11 // overwrite updated protocol version diff --git a/proto/tendermint/crypto/crypto.go b/proto/tendermint/crypto/crypto.go new file mode 100644 index 0000000000..66b34cc2bc --- /dev/null +++ b/proto/tendermint/crypto/crypto.go @@ -0,0 +1,7 @@ +package crypto + +// These functions export type tags for use with internal/jsontypes. + +func (*PublicKey) TypeTag() string { return "tendermint.crypto.PublicKey" } +func (*PublicKey_Ed25519) TypeTag() string { return "tendermint.crypto.PublicKey_Ed25519" } +func (*PublicKey_Secp256K1) TypeTag() string { return "tendermint.crypto.PublicKey_Secp256K1" } diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 38c8239dde..61036142fb 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -13,10 +13,6 @@ func (m *PexMessage) Wrap(pb proto.Message) error { m.Sum = &PexMessage_PexRequest{PexRequest: msg} case *PexResponse: m.Sum = &PexMessage_PexResponse{PexResponse: msg} - case *PexRequestV2: - m.Sum = &PexMessage_PexRequestV2{PexRequestV2: msg} - case *PexResponseV2: - m.Sum = &PexMessage_PexResponseV2{PexResponseV2: msg} default: return fmt.Errorf("unknown pex message: %T", msg) } @@ -31,10 +27,6 @@ func (m *PexMessage) Unwrap() (proto.Message, error) { return msg.PexRequest, nil case *PexMessage_PexResponse: return msg.PexResponse, nil - case *PexMessage_PexRequestV2: - return msg.PexRequestV2, nil - case *PexMessage_PexResponseV2: - return msg.PexResponseV2, nil default: return nil, fmt.Errorf("unknown pex message: %T", msg) } diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 63882c3643..15ccce15e5 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PexAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (m *PexAddress) Reset() { *m = PexAddress{} } @@ -62,27 +60,13 @@ func (m *PexAddress) XXX_DiscardUnknown() { var xxx_messageInfo_PexAddress proto.InternalMessageInfo -func (m *PexAddress) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *PexAddress) GetIP() string { +func (m *PexAddress) GetURL() string { if m != nil { - return m.IP + return m.URL } return "" } -func (m *PexAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type PexRequest struct { } @@ -163,136 +147,10 @@ func (m *PexResponse) GetAddresses() []PexAddress { return nil } -type PexAddressV2 struct { - URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` -} - -func (m *PexAddressV2) Reset() { *m = PexAddressV2{} } -func (m *PexAddressV2) String() string { return proto.CompactTextString(m) } -func (*PexAddressV2) ProtoMessage() {} -func (*PexAddressV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{3} -} -func (m *PexAddressV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexAddressV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexAddressV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexAddressV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddressV2.Merge(m, src) -} -func (m *PexAddressV2) XXX_Size() int { - return m.Size() -} -func (m *PexAddressV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddressV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexAddressV2 proto.InternalMessageInfo - -func (m *PexAddressV2) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -type PexRequestV2 struct { -} - -func (m *PexRequestV2) Reset() { *m = PexRequestV2{} } -func (m *PexRequestV2) String() string { return proto.CompactTextString(m) } -func (*PexRequestV2) ProtoMessage() {} -func (*PexRequestV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{4} -} -func (m *PexRequestV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexRequestV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexRequestV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexRequestV2.Merge(m, src) -} -func (m *PexRequestV2) XXX_Size() int { - return m.Size() -} -func (m *PexRequestV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexRequestV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexRequestV2 proto.InternalMessageInfo - -type PexResponseV2 struct { - Addresses []PexAddressV2 `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` -} - -func (m *PexResponseV2) Reset() { *m = PexResponseV2{} } -func (m *PexResponseV2) String() string { return proto.CompactTextString(m) } -func (*PexResponseV2) ProtoMessage() {} -func (*PexResponseV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{5} -} -func (m *PexResponseV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexResponseV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexResponseV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexResponseV2.Merge(m, src) -} -func (m *PexResponseV2) XXX_Size() int { - return m.Size() -} -func (m *PexResponseV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexResponseV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexResponseV2 proto.InternalMessageInfo - -func (m *PexResponseV2) GetAddresses() []PexAddressV2 { - if m != nil { - return m.Addresses - } - return nil -} - type PexMessage struct { // Types that are valid to be assigned to Sum: // *PexMessage_PexRequest // *PexMessage_PexResponse - // *PexMessage_PexRequestV2 - // *PexMessage_PexResponseV2 Sum isPexMessage_Sum `protobuf_oneof:"sum"` } @@ -300,7 +158,7 @@ func (m *PexMessage) Reset() { *m = PexMessage{} } func (m *PexMessage) String() string { return proto.CompactTextString(m) } func (*PexMessage) ProtoMessage() {} func (*PexMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{6} + return fileDescriptor_81c2f011fd13be57, []int{3} } func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,22 +194,14 @@ type isPexMessage_Sum interface { } type PexMessage_PexRequest struct { - PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` + PexRequest *PexRequest `protobuf:"bytes,3,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } type PexMessage_PexResponse struct { - PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` -} -type PexMessage_PexRequestV2 struct { - PexRequestV2 *PexRequestV2 `protobuf:"bytes,3,opt,name=pex_request_v2,json=pexRequestV2,proto3,oneof" json:"pex_request_v2,omitempty"` -} -type PexMessage_PexResponseV2 struct { - PexResponseV2 *PexResponseV2 `protobuf:"bytes,4,opt,name=pex_response_v2,json=pexResponseV2,proto3,oneof" json:"pex_response_v2,omitempty"` + PexResponse *PexResponse `protobuf:"bytes,4,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*PexMessage_PexRequest) isPexMessage_Sum() {} -func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (*PexMessage_PexRequestV2) isPexMessage_Sum() {} -func (*PexMessage_PexResponseV2) isPexMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { @@ -374,27 +224,11 @@ func (m *PexMessage) GetPexResponse() *PexResponse { return nil } -func (m *PexMessage) GetPexRequestV2() *PexRequestV2 { - if x, ok := m.GetSum().(*PexMessage_PexRequestV2); ok { - return x.PexRequestV2 - } - return nil -} - -func (m *PexMessage) GetPexResponseV2() *PexResponseV2 { - if x, ok := m.GetSum().(*PexMessage_PexResponseV2); ok { - return x.PexResponseV2 - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PexMessage_PexRequest)(nil), (*PexMessage_PexResponse)(nil), - (*PexMessage_PexRequestV2)(nil), - (*PexMessage_PexResponseV2)(nil), } } @@ -402,42 +236,33 @@ func init() { proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") - proto.RegisterType((*PexAddressV2)(nil), "tendermint.p2p.PexAddressV2") - proto.RegisterType((*PexRequestV2)(nil), "tendermint.p2p.PexRequestV2") - proto.RegisterType((*PexResponseV2)(nil), "tendermint.p2p.PexResponseV2") proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x8a, 0xda, 0x40, - 0x14, 0xc7, 0xf3, 0x61, 0x2d, 0x9e, 0x44, 0x0b, 0x43, 0x29, 0xa9, 0x6d, 0xa3, 0xe4, 0xca, 0xde, - 0x24, 0x30, 0xa5, 0x97, 0x2d, 0x36, 0x08, 0xb5, 0x50, 0xa9, 0x1d, 0xd8, 0x5c, 0xec, 0x8d, 0xe8, - 0x66, 0xc8, 0x06, 0x56, 0x33, 0x9b, 0x49, 0x16, 0x1f, 0x63, 0xdf, 0x61, 0x5f, 0xc6, 0x4b, 0x2f, - 0xf7, 0x4a, 0x96, 0xf8, 0x22, 0x8b, 0x13, 0x31, 0x23, 0xba, 0x7b, 0x37, 0xe7, 0x7f, 0xbe, 0x7e, - 0xe7, 0xcc, 0x01, 0x2b, 0xa3, 0x8b, 0x90, 0xa6, 0xf3, 0x78, 0x91, 0x79, 0x0c, 0x33, 0x8f, 0xd1, - 0xa5, 0xcb, 0xd2, 0x24, 0x4b, 0x50, 0xab, 0xf2, 0xb8, 0x0c, 0xb3, 0xf6, 0xfb, 0x28, 0x89, 0x12, - 0xe1, 0xf2, 0x76, 0xaf, 0x32, 0xca, 0x19, 0x03, 0x8c, 0xe9, 0xf2, 0x57, 0x18, 0xa6, 0x94, 0x73, - 0xf4, 0x01, 0xb4, 0x38, 0xb4, 0xd4, 0xae, 0xda, 0x6b, 0xf8, 0xf5, 0x62, 0xd3, 0xd1, 0xfe, 0x0c, - 0x88, 0x16, 0x87, 0x42, 0x67, 0x96, 0x26, 0xe9, 0x63, 0xa2, 0xc5, 0x0c, 0x21, 0xa8, 0xb1, 0x24, - 0xcd, 0x2c, 0xbd, 0xab, 0xf6, 0x9a, 0x44, 0xbc, 0x1d, 0x53, 0x54, 0x24, 0xf4, 0x36, 0xa7, 0x3c, - 0x73, 0x46, 0x60, 0x08, 0x8b, 0xb3, 0x64, 0xc1, 0x29, 0xfa, 0x09, 0x8d, 0x69, 0xd9, 0x8b, 0x72, - 0x4b, 0xed, 0xea, 0x3d, 0x03, 0xb7, 0xdd, 0x63, 0x50, 0xb7, 0xe2, 0xf1, 0x6b, 0xab, 0x4d, 0x47, - 0x21, 0x55, 0x8a, 0xf3, 0x15, 0xcc, 0xca, 0x1d, 0x60, 0xf4, 0x11, 0xf4, 0x3c, 0xbd, 0xd9, 0x13, - 0xbf, 0x2d, 0x36, 0x1d, 0xfd, 0x82, 0xfc, 0x25, 0x3b, 0xcd, 0x69, 0x89, 0xd0, 0x3d, 0x47, 0x80, - 0x9d, 0xff, 0xd0, 0x94, 0x48, 0x02, 0x8c, 0xfa, 0xa7, 0x2c, 0x9f, 0x5f, 0x66, 0x09, 0xf0, 0x29, - 0xcd, 0x83, 0x26, 0x66, 0x1d, 0x51, 0xce, 0xa7, 0x11, 0x45, 0x3f, 0xc0, 0x60, 0x74, 0x39, 0x49, - 0xcb, 0x96, 0x02, 0xea, 0xfc, 0x78, 0x7b, 0xa8, 0xa1, 0x42, 0x80, 0x1d, 0x2c, 0xd4, 0x07, 0xb3, - 0x4c, 0x2f, 0x09, 0xc5, 0xba, 0x0d, 0xfc, 0xe9, 0x6c, 0x7e, 0x19, 0x32, 0x54, 0x88, 0xc1, 0xa4, - 0xed, 0x0e, 0xa0, 0x25, 0x01, 0x4c, 0xee, 0xb0, 0xf8, 0x98, 0xf3, 0x63, 0x1d, 0x16, 0x33, 0x54, - 0x88, 0xc9, 0x24, 0x1b, 0xfd, 0x86, 0x77, 0x32, 0xc7, 0xae, 0x4c, 0x4d, 0x94, 0xf9, 0xf2, 0x0a, - 0x8a, 0xa8, 0xd3, 0x64, 0xb2, 0xe0, 0xbf, 0x01, 0x9d, 0xe7, 0x73, 0xff, 0xdf, 0xaa, 0xb0, 0xd5, - 0x75, 0x61, 0xab, 0x4f, 0x85, 0xad, 0xde, 0x6f, 0x6d, 0x65, 0xbd, 0xb5, 0x95, 0xc7, 0xad, 0xad, - 0x5c, 0x7e, 0x8f, 0xe2, 0xec, 0x3a, 0x9f, 0xb9, 0x57, 0xc9, 0xdc, 0x93, 0xee, 0x58, 0x3e, 0x69, - 0x71, 0xaf, 0xc7, 0x37, 0x3e, 0xab, 0x0b, 0xf5, 0xdb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x9b, 0xfd, 0x75, 0xfc, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x89, 0xa4, 0xe7, + 0xa7, 0xe7, 0x83, 0xa5, 0xf4, 0x41, 0x2c, 0x88, 0x2a, 0x25, 0x63, 0x2e, 0xae, 0x80, 0xd4, 0x0a, + 0xc7, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x21, 0x49, 0x2e, 0xe6, 0xd2, 0xa2, 0x1c, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xf6, 0x47, 0xf7, 0xe4, 0x99, 0x43, 0x83, 0x7c, 0x82, 0x40, 0x62, + 0x5e, 0x2c, 0x1c, 0x4c, 0x02, 0xcc, 0x5e, 0x2c, 0x1c, 0xcc, 0x02, 0x2c, 0x4a, 0x3c, 0x60, 0x4d, + 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xbe, 0x5c, 0xdc, 0x60, 0x5e, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x90, 0x1d, 0x17, 0x67, 0x22, 0xc4, 0xb8, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, + 0x0d, 0x6e, 0x23, 0x29, 0x3d, 0x54, 0xb7, 0xe8, 0x21, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xa1, 0x45, 0x69, 0x01, 0x23, 0xd8, 0x74, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, + 0x21, 0x5b, 0x2e, 0xee, 0x82, 0xd4, 0x8a, 0xf8, 0x22, 0x88, 0x65, 0x12, 0xcc, 0x0a, 0x8c, 0x38, + 0x0c, 0x84, 0x3a, 0xc7, 0x83, 0x21, 0x88, 0xab, 0x00, 0xce, 0x13, 0x72, 0xe0, 0xe2, 0x81, 0x68, + 0x87, 0xb8, 0x4e, 0x82, 0x05, 0xac, 0x5f, 0x1a, 0xab, 0x7e, 0x88, 0x12, 0x0f, 0x86, 0x20, 0xee, + 0x02, 0x04, 0xd7, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0x8b, 0x85, 0x83, 0x51, 0x80, 0x09, + 0x12, 0x0a, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, + 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, + 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x33, 0xc8, 0x91, 0x04, + 0x8e, 0x01, 0xd4, 0x58, 0x4b, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa7, + 0x1d, 0xdd, 0x6f, 0xce, 0x01, 0x00, 0x00, } func (m *PexAddress) Marshal() (dAtA []byte, err error) { @@ -460,22 +285,10 @@ func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Port != 0 { - i = encodeVarintPex(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + if len(m.URL) > 0 { + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa } @@ -542,96 +355,6 @@ func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddressV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexAddressV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexAddressV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.URL) > 0 { - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PexRequestV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PexResponseV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -681,7 +404,7 @@ func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } @@ -702,48 +425,6 @@ func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexRequestV2 != nil { - { - size, err := m.PexRequestV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexResponseV2 != nil { - { - size, err := m.PexResponseV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil @@ -765,17 +446,10 @@ func (m *PexAddress) Size() (n int) { } var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - l = len(m.IP) + l = len(m.URL) if l > 0 { n += 1 + l + sovPex(uint64(l)) } - if m.Port != 0 { - n += 1 + sovPex(uint64(m.Port)) - } return n } @@ -803,100 +477,39 @@ func (m *PexResponse) Size() (n int) { return n } -func (m *PexAddressV2) Size() (n int) { +func (m *PexMessage) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.URL) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) + if m.Sum != nil { + n += m.Sum.Size() } return n } -func (m *PexRequestV2) Size() (n int) { +func (m *PexMessage_PexRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.PexRequest != nil { + l = m.PexRequest.Size() + n += 1 + l + sovPex(uint64(l)) + } return n } - -func (m *PexResponseV2) Size() (n int) { +func (m *PexMessage_PexResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPex(uint64(l)) - } - } - return n -} - -func (m *PexMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - return n -} - -func (m *PexMessage_PexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequest != nil { - l = m.PexRequest.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponse != nil { - l = m.PexResponse.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequestV2 != nil { - l = m.PexRequestV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponseV2 != nil { - l = m.PexResponseV2.Size() - n += 1 + l + sovPex(uint64(l)) + if m.PexResponse != nil { + l = m.PexResponse.Size() + n += 1 + l + sovPex(uint64(l)) } return n } @@ -938,39 +551,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -998,27 +579,8 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) @@ -1174,222 +736,6 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *PexAddressV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexAddressV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddressV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexRequestV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexRequestV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexResponseV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexResponseV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, PexAddressV2{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1419,7 +765,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) } @@ -1454,7 +800,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } @@ -1489,76 +835,6 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexRequestV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexRequestV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexRequestV2{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexResponseV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexResponseV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexResponseV2{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go index cccd257666..9b2a41b624 100644 --- a/proto/tendermint/statesync/message_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -7,9 +7,9 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func TestValidateMsg(t *testing.T) { @@ -186,10 +186,26 @@ func TestStateSyncVectors(t *testing.T) { { "ParamsResponse", &ssproto.ParamsResponse{ - Height: 9001, - ConsensusParams: types.DefaultConsensusParams().ToProto(), + Height: 9001, + ConsensusParams: tmproto.ConsensusParams{ + Block: &tmproto.BlockParams{ + MaxBytes: 10, + MaxGas: 20, + }, + Evidence: &tmproto.EvidenceParams{ + MaxAgeNumBlocks: 10, + MaxAgeDuration: 300, + MaxBytes: 100, + }, + Validator: &tmproto.ValidatorParams{ + PubKeyTypes: []string{ed25519.KeyType}, + }, + Version: &tmproto.VersionParams{ + AppVersion: 11, + }, + }, }, - "423408a946122f0a10088080c00a10ffffffffffffffffff01120e08a08d0612040880c60a188080401a090a07656432353531392200", + "422508a94612200a04080a10141209080a120310ac0218641a090a07656432353531392202080b", }, } diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 5a9f103a9c..9116daca79 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -34,6 +34,7 @@ type ConsensusParams struct { Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } @@ -97,6 +98,13 @@ func (m *ConsensusParams) GetVersion() *VersionParams { return nil } +func (m *ConsensusParams) GetSynchrony() *SynchronyParams { + if m != nil { + return m.Synchrony + } + return nil +} + // BlockParams contains limits on the block size. type BlockParams struct { // Max block size, in bytes. @@ -373,6 +381,66 @@ func (m *HashedParams) GetBlockMaxGas() int64 { return 0 } +// SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid. +// These parameters are part of the proposer-based timestamps algorithm. For more information, +// see the specification of proposer-based timestamps: +// https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp +type SynchronyParams struct { + // message_delay bounds how long a proposal message may take to reach all validators on a newtork + // and still be considered valid. + MessageDelay *time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay,omitempty"` + // precision bounds how skewed a proposer's clock may be from any validator + // on the network while still producing valid proposals. + Precision *time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision,omitempty"` +} + +func (m *SynchronyParams) Reset() { *m = SynchronyParams{} } +func (m *SynchronyParams) String() string { return proto.CompactTextString(m) } +func (*SynchronyParams) ProtoMessage() {} +func (*SynchronyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{6} +} +func (m *SynchronyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SynchronyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SynchronyParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SynchronyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_SynchronyParams.Merge(m, src) +} +func (m *SynchronyParams) XXX_Size() int { + return m.Size() +} +func (m *SynchronyParams) XXX_DiscardUnknown() { + xxx_messageInfo_SynchronyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo + +func (m *SynchronyParams) GetMessageDelay() *time.Duration { + if m != nil { + return m.MessageDelay + } + return nil +} + +func (m *SynchronyParams) GetPrecision() *time.Duration { + if m != nil { + return m.Precision + } + return nil +} + func init() { proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") @@ -380,44 +448,49 @@ func init() { proto.RegisterType((*ValidatorParams)(nil), "tendermint.types.ValidatorParams") proto.RegisterType((*VersionParams)(nil), "tendermint.types.VersionParams") proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") + proto.RegisterType((*SynchronyParams)(nil), "tendermint.types.SynchronyParams") } func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xc1, 0x6a, 0xd4, 0x40, - 0x1c, 0xc6, 0x77, 0x9a, 0xda, 0xee, 0xfe, 0xe3, 0x76, 0xcb, 0x20, 0x18, 0x2b, 0xcd, 0xae, 0x39, - 0x48, 0x41, 0x48, 0xc4, 0x22, 0x22, 0x08, 0xe2, 0x56, 0xa9, 0x20, 0x15, 0x09, 0xea, 0xa1, 0x97, - 0x30, 0xd9, 0x8c, 0x69, 0xe8, 0x4e, 0x66, 0xc8, 0x24, 0xcb, 0xee, 0xcd, 0x47, 0xf0, 0xe8, 0x23, - 0xe8, 0x9b, 0xf4, 0xd8, 0xa3, 0x27, 0x95, 0xdd, 0x17, 0x91, 0x4c, 0x32, 0xa6, 0x9b, 0xf6, 0x36, - 0x33, 0xdf, 0xef, 0x9b, 0xe1, 0xfb, 0x86, 0x3f, 0xec, 0xe7, 0x34, 0x8d, 0x68, 0xc6, 0x92, 0x34, - 0xf7, 0xf2, 0x85, 0xa0, 0xd2, 0x13, 0x24, 0x23, 0x4c, 0xba, 0x22, 0xe3, 0x39, 0xc7, 0xbb, 0x8d, - 0xec, 0x2a, 0x79, 0xef, 0x4e, 0xcc, 0x63, 0xae, 0x44, 0xaf, 0x5c, 0x55, 0xdc, 0x9e, 0x1d, 0x73, - 0x1e, 0x4f, 0xa9, 0xa7, 0x76, 0x61, 0xf1, 0xc5, 0x8b, 0x8a, 0x8c, 0xe4, 0x09, 0x4f, 0x2b, 0xdd, - 0xf9, 0xba, 0x01, 0x83, 0x23, 0x9e, 0x4a, 0x9a, 0xca, 0x42, 0x7e, 0x50, 0x2f, 0xe0, 0x43, 0xb8, - 0x15, 0x4e, 0xf9, 0xe4, 0xdc, 0x42, 0x23, 0x74, 0x60, 0x3e, 0xd9, 0x77, 0xdb, 0x6f, 0xb9, 0xe3, - 0x52, 0xae, 0x68, 0xbf, 0x62, 0xf1, 0x0b, 0xe8, 0xd2, 0x59, 0x12, 0xd1, 0x74, 0x42, 0xad, 0x0d, - 0xe5, 0x1b, 0x5d, 0xf7, 0xbd, 0xa9, 0x89, 0xda, 0xfa, 0xdf, 0x81, 0x5f, 0x42, 0x6f, 0x46, 0xa6, - 0x49, 0x44, 0x72, 0x9e, 0x59, 0x86, 0xb2, 0x3f, 0xb8, 0x6e, 0xff, 0xac, 0x91, 0xda, 0xdf, 0x78, - 0xf0, 0x73, 0xd8, 0x9e, 0xd1, 0x4c, 0x26, 0x3c, 0xb5, 0x36, 0x95, 0x7d, 0x78, 0x83, 0xbd, 0x02, - 0x6a, 0xb3, 0xe6, 0x9d, 0x23, 0x30, 0xaf, 0xe4, 0xc1, 0xf7, 0xa1, 0xc7, 0xc8, 0x3c, 0x08, 0x17, - 0x39, 0x95, 0xaa, 0x01, 0xc3, 0xef, 0x32, 0x32, 0x1f, 0x97, 0x7b, 0x7c, 0x17, 0xb6, 0x4b, 0x31, - 0x26, 0x52, 0x85, 0x34, 0xfc, 0x2d, 0x46, 0xe6, 0xc7, 0x44, 0x3a, 0x3f, 0x11, 0xec, 0xac, 0xa7, - 0xc3, 0x8f, 0x00, 0x97, 0x2c, 0x89, 0x69, 0x90, 0x16, 0x2c, 0x50, 0x35, 0xe9, 0x1b, 0x07, 0x8c, - 0xcc, 0x5f, 0xc5, 0xf4, 0x7d, 0xc1, 0xd4, 0xd3, 0x12, 0x9f, 0xc0, 0xae, 0x86, 0xf5, 0x0f, 0xd5, - 0x35, 0xde, 0x73, 0xab, 0x2f, 0x74, 0xf5, 0x17, 0xba, 0xaf, 0x6b, 0x60, 0xdc, 0xbd, 0xf8, 0x3d, - 0xec, 0x7c, 0xff, 0x33, 0x44, 0xfe, 0x4e, 0x75, 0x9f, 0x56, 0xd6, 0x43, 0x18, 0xeb, 0x21, 0x9c, - 0xa7, 0x30, 0x68, 0x35, 0x89, 0x1d, 0xe8, 0x8b, 0x22, 0x0c, 0xce, 0xe9, 0x22, 0x50, 0x5d, 0x59, - 0x68, 0x64, 0x1c, 0xf4, 0x7c, 0x53, 0x14, 0xe1, 0x3b, 0xba, 0xf8, 0x58, 0x1e, 0x39, 0x8f, 0xa1, - 0xbf, 0xd6, 0x20, 0x1e, 0x82, 0x49, 0x84, 0x08, 0x74, 0xef, 0x65, 0xb2, 0x4d, 0x1f, 0x88, 0x10, - 0x35, 0xe6, 0x9c, 0xc2, 0xed, 0xb7, 0x44, 0x9e, 0xd1, 0xa8, 0x36, 0x3c, 0x84, 0x81, 0x6a, 0x21, - 0x68, 0x17, 0xdc, 0x57, 0xc7, 0x27, 0xba, 0x65, 0x07, 0xfa, 0x0d, 0xd7, 0x74, 0x6d, 0x6a, 0xea, - 0x98, 0xc8, 0xf1, 0xa7, 0x1f, 0x4b, 0x1b, 0x5d, 0x2c, 0x6d, 0x74, 0xb9, 0xb4, 0xd1, 0xdf, 0xa5, - 0x8d, 0xbe, 0xad, 0xec, 0xce, 0xe5, 0xca, 0xee, 0xfc, 0x5a, 0xd9, 0x9d, 0xd3, 0x67, 0x71, 0x92, - 0x9f, 0x15, 0xa1, 0x3b, 0xe1, 0xcc, 0xbb, 0x3a, 0x48, 0xcd, 0xb2, 0x9a, 0x94, 0xf6, 0x90, 0x85, - 0x5b, 0xea, 0xfc, 0xf0, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x54, 0x4f, 0xe1, 0x7f, 0x03, - 0x00, 0x00, + // 565 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4d, 0x8b, 0xd3, 0x40, + 0x18, 0xc7, 0x9b, 0xed, 0xbe, 0xb4, 0x4f, 0xb7, 0xdb, 0x65, 0x10, 0x8c, 0x2b, 0x9b, 0xd6, 0x1c, + 0x64, 0x41, 0x48, 0xc4, 0x45, 0x44, 0x50, 0xc4, 0x6e, 0x45, 0x41, 0x56, 0x24, 0xbe, 0x1c, 0xf6, + 0x12, 0x26, 0xed, 0x98, 0x86, 0x6d, 0x32, 0x43, 0x26, 0x29, 0xcd, 0xb7, 0xf0, 0x24, 0x7e, 0x04, + 0xfd, 0x18, 0xde, 0xf6, 0xb8, 0x47, 0x4f, 0x2a, 0xed, 0x17, 0x91, 0x99, 0xcc, 0x6c, 0xb6, 0x5d, + 0x15, 0x6f, 0xc9, 0x3c, 0xff, 0xdf, 0x3c, 0xcc, 0xef, 0x49, 0x06, 0xf6, 0x33, 0x92, 0x8c, 0x48, + 0x1a, 0x47, 0x49, 0xe6, 0x66, 0x05, 0x23, 0xdc, 0x65, 0x38, 0xc5, 0x31, 0x77, 0x58, 0x4a, 0x33, + 0x8a, 0x76, 0xab, 0xb2, 0x23, 0xcb, 0x7b, 0xd7, 0x42, 0x1a, 0x52, 0x59, 0x74, 0xc5, 0x53, 0x99, + 0xdb, 0xb3, 0x42, 0x4a, 0xc3, 0x09, 0x71, 0xe5, 0x5b, 0x90, 0x7f, 0x70, 0x47, 0x79, 0x8a, 0xb3, + 0x88, 0x26, 0x65, 0xdd, 0xfe, 0xb6, 0x06, 0x9d, 0x23, 0x9a, 0x70, 0x92, 0xf0, 0x9c, 0xbf, 0x96, + 0x1d, 0xd0, 0x21, 0x6c, 0x04, 0x13, 0x3a, 0x3c, 0x35, 0x8d, 0x9e, 0x71, 0xd0, 0xba, 0xb7, 0xef, + 0xac, 0xf6, 0x72, 0xfa, 0xa2, 0x5c, 0xa6, 0xbd, 0x32, 0x8b, 0x1e, 0x41, 0x83, 0x4c, 0xa3, 0x11, + 0x49, 0x86, 0xc4, 0x5c, 0x93, 0x5c, 0xef, 0x2a, 0xf7, 0x4c, 0x25, 0x14, 0x7a, 0x41, 0xa0, 0x27, + 0xd0, 0x9c, 0xe2, 0x49, 0x34, 0xc2, 0x19, 0x4d, 0xcd, 0xba, 0xc4, 0x6f, 0x5d, 0xc5, 0xdf, 0xeb, + 0x88, 0xe2, 0x2b, 0x06, 0x3d, 0x84, 0xad, 0x29, 0x49, 0x79, 0x44, 0x13, 0x73, 0x5d, 0xe2, 0xdd, + 0x3f, 0xe0, 0x65, 0x40, 0xc1, 0x3a, 0x2f, 0x7a, 0xf3, 0x22, 0x19, 0x8e, 0x53, 0x9a, 0x14, 0xe6, + 0xc6, 0xdf, 0x7a, 0xbf, 0xd1, 0x11, 0xdd, 0xfb, 0x82, 0xb1, 0x8f, 0xa0, 0x75, 0x49, 0x08, 0xba, + 0x09, 0xcd, 0x18, 0xcf, 0xfc, 0xa0, 0xc8, 0x08, 0x97, 0x0a, 0xeb, 0x5e, 0x23, 0xc6, 0xb3, 0xbe, + 0x78, 0x47, 0xd7, 0x61, 0x4b, 0x14, 0x43, 0xcc, 0xa5, 0xa5, 0xba, 0xb7, 0x19, 0xe3, 0xd9, 0x73, + 0xcc, 0xed, 0xaf, 0x06, 0xec, 0x2c, 0xeb, 0x41, 0x77, 0x00, 0x89, 0x2c, 0x0e, 0x89, 0x9f, 0xe4, + 0xb1, 0x2f, 0x3d, 0xeb, 0x1d, 0x3b, 0x31, 0x9e, 0x3d, 0x0d, 0xc9, 0xab, 0x3c, 0x96, 0xad, 0x39, + 0x3a, 0x86, 0x5d, 0x1d, 0xd6, 0x23, 0x56, 0x73, 0xb8, 0xe1, 0x94, 0xdf, 0x80, 0xa3, 0xbf, 0x01, + 0x67, 0xa0, 0x02, 0xfd, 0xc6, 0xd9, 0x8f, 0x6e, 0xed, 0xf3, 0xcf, 0xae, 0xe1, 0xed, 0x94, 0xfb, + 0xe9, 0xca, 0xf2, 0x21, 0xea, 0xcb, 0x87, 0xb0, 0xef, 0x43, 0x67, 0x65, 0x14, 0xc8, 0x86, 0x36, + 0xcb, 0x03, 0xff, 0x94, 0x14, 0xbe, 0xf4, 0x65, 0x1a, 0xbd, 0xfa, 0x41, 0xd3, 0x6b, 0xb1, 0x3c, + 0x78, 0x49, 0x8a, 0xb7, 0x62, 0xc9, 0xbe, 0x0b, 0xed, 0xa5, 0x11, 0xa0, 0x2e, 0xb4, 0x30, 0x63, + 0xbe, 0x1e, 0x9c, 0x38, 0xd9, 0xba, 0x07, 0x98, 0x31, 0x15, 0xb3, 0x4f, 0x60, 0xfb, 0x05, 0xe6, + 0x63, 0x32, 0x52, 0xc0, 0x6d, 0xe8, 0x48, 0x0b, 0xfe, 0xaa, 0xe0, 0xb6, 0x5c, 0x3e, 0xd6, 0x96, + 0x6d, 0x68, 0x57, 0xb9, 0xca, 0x75, 0x4b, 0xa7, 0x84, 0xf0, 0x4f, 0x06, 0x74, 0x56, 0x86, 0x8a, + 0x06, 0xd0, 0x8e, 0x09, 0xe7, 0x52, 0x22, 0x99, 0xe0, 0x42, 0xfd, 0x01, 0xff, 0x30, 0xb8, 0x2e, + 0xed, 0x6d, 0x2b, 0x6a, 0x20, 0x20, 0xf4, 0x18, 0x9a, 0x2c, 0x25, 0xc3, 0x88, 0xff, 0xd7, 0x0c, + 0xca, 0x1d, 0x2a, 0xa2, 0xff, 0xee, 0xcb, 0xdc, 0x32, 0xce, 0xe6, 0x96, 0x71, 0x3e, 0xb7, 0x8c, + 0x5f, 0x73, 0xcb, 0xf8, 0xb8, 0xb0, 0x6a, 0xe7, 0x0b, 0xab, 0xf6, 0x7d, 0x61, 0xd5, 0x4e, 0x1e, + 0x84, 0x51, 0x36, 0xce, 0x03, 0x67, 0x48, 0x63, 0xf7, 0xf2, 0x15, 0x51, 0x3d, 0x96, 0x77, 0xc0, + 0xea, 0xf5, 0x11, 0x6c, 0xca, 0xf5, 0xc3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x57, 0x89, 0x7c, + 0xd9, 0x59, 0x04, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -451,6 +524,9 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.Version.Equal(that1.Version) { return false } + if !this.Synchrony.Equal(that1.Synchrony) { + return false + } return true } func (this *BlockParams) Equal(that interface{}) bool { @@ -590,6 +666,45 @@ func (this *HashedParams) Equal(that interface{}) bool { } return true } +func (this *SynchronyParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SynchronyParams) + if !ok { + that2, ok := that.(SynchronyParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MessageDelay != nil && that1.MessageDelay != nil { + if *this.MessageDelay != *that1.MessageDelay { + return false + } + } else if this.MessageDelay != nil { + return false + } else if that1.MessageDelay != nil { + return false + } + if this.Precision != nil && that1.Precision != nil { + if *this.Precision != *that1.Precision { + return false + } + } else if this.Precision != nil { + return false + } else if that1.Precision != nil { + return false + } + return true +} func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -610,6 +725,18 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Synchrony != nil { + { + size, err := m.Synchrony.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.Version != nil { { size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) @@ -719,12 +846,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err5 != nil { - return 0, err5 + n6, err6 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err6 != nil { + return 0, err6 } - i -= n5 - i = encodeVarintParams(dAtA, i, uint64(n5)) + i -= n6 + i = encodeVarintParams(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -828,6 +955,49 @@ func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SynchronyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SynchronyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Precision != nil { + n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) + if err7 != nil { + return 0, err7 + } + i -= n7 + i = encodeVarintParams(dAtA, i, uint64(n7)) + i-- + dAtA[i] = 0x12 + } + if m.MessageDelay != nil { + n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintParams(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintParams(dAtA []byte, offset int, v uint64) int { offset -= sovParams(v) base := offset @@ -861,6 +1031,10 @@ func (m *ConsensusParams) Size() (n int) { l = m.Version.Size() n += 1 + l + sovParams(uint64(l)) } + if m.Synchrony != nil { + l = m.Synchrony.Size() + n += 1 + l + sovParams(uint64(l)) + } return n } @@ -938,6 +1112,23 @@ func (m *HashedParams) Size() (n int) { return n } +func (m *SynchronyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MessageDelay != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay) + n += 1 + l + sovParams(uint64(l)) + } + if m.Precision != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision) + n += 1 + l + sovParams(uint64(l)) + } + return n +} + func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1117,6 +1308,42 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchrony", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchrony == nil { + m.Synchrony = &SynchronyParams{} + } + if err := m.Synchrony.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -1586,6 +1813,128 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { } return nil } +func (m *SynchronyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MessageDelay == nil { + m.MessageDelay = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Precision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Precision == nil { + m.Precision = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Precision, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index d15c6f2163..671b520430 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -978,14 +978,15 @@ func (m *DataAvailabilityHeader) GetColumnRoots() [][]byte { // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { - Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` - BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` - Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ValidatorAddress []byte `protobuf:"bytes,6,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` - ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` - Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ValidatorAddress []byte `protobuf:"bytes,6,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` + Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` + VoteExtension *VoteExtension `protobuf:"bytes,9,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } func (m *Vote) Reset() { *m = Vote{} } @@ -1077,6 +1078,112 @@ func (m *Vote) GetSignature() []byte { return nil } +func (m *Vote) GetVoteExtension() *VoteExtension { + if m != nil { + return m.VoteExtension + } + return nil +} + +// VoteExtension is app-defined additional information to the validator votes. +type VoteExtension struct { + AppDataToSign []byte `protobuf:"bytes,1,opt,name=app_data_to_sign,json=appDataToSign,proto3" json:"app_data_to_sign,omitempty"` + AppDataSelfAuthenticating []byte `protobuf:"bytes,2,opt,name=app_data_self_authenticating,json=appDataSelfAuthenticating,proto3" json:"app_data_self_authenticating,omitempty"` +} + +func (m *VoteExtension) Reset() { *m = VoteExtension{} } +func (m *VoteExtension) String() string { return proto.CompactTextString(m) } +func (*VoteExtension) ProtoMessage() {} +func (*VoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{14} +} +func (m *VoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteExtension.Merge(m, src) +} +func (m *VoteExtension) XXX_Size() int { + return m.Size() +} +func (m *VoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_VoteExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteExtension proto.InternalMessageInfo + +func (m *VoteExtension) GetAppDataToSign() []byte { + if m != nil { + return m.AppDataToSign + } + return nil +} + +func (m *VoteExtension) GetAppDataSelfAuthenticating() []byte { + if m != nil { + return m.AppDataSelfAuthenticating + } + return nil +} + +// VoteExtensionToSign is a subset of VoteExtension that is signed by the validators private key. +// VoteExtensionToSign is extracted from an existing VoteExtension. +type VoteExtensionToSign struct { + AppDataToSign []byte `protobuf:"bytes,1,opt,name=app_data_to_sign,json=appDataToSign,proto3" json:"app_data_to_sign,omitempty"` +} + +func (m *VoteExtensionToSign) Reset() { *m = VoteExtensionToSign{} } +func (m *VoteExtensionToSign) String() string { return proto.CompactTextString(m) } +func (*VoteExtensionToSign) ProtoMessage() {} +func (*VoteExtensionToSign) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{15} +} +func (m *VoteExtensionToSign) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteExtensionToSign) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteExtensionToSign.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteExtensionToSign) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteExtensionToSign.Merge(m, src) +} +func (m *VoteExtensionToSign) XXX_Size() int { + return m.Size() +} +func (m *VoteExtensionToSign) XXX_DiscardUnknown() { + xxx_messageInfo_VoteExtensionToSign.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteExtensionToSign proto.InternalMessageInfo + +func (m *VoteExtensionToSign) GetAppDataToSign() []byte { + if m != nil { + return m.AppDataToSign + } + return nil +} + // Commit contains the evidence that a block was committed by a set of validators. type Commit struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` @@ -1089,7 +1196,7 @@ func (m *Commit) Reset() { *m = Commit{} } func (m *Commit) String() string { return proto.CompactTextString(m) } func (*Commit) ProtoMessage() {} func (*Commit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{14} + return fileDescriptor_d3a6e55e2345de56, []int{16} } func (m *Commit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1148,17 +1255,18 @@ func (m *Commit) GetSignatures() []CommitSig { // CommitSig is a part of the Vote included in a Commit. type CommitSig struct { - BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` - ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` - Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + VoteExtension *VoteExtensionToSign `protobuf:"bytes,5,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } func (m *CommitSig) Reset() { *m = CommitSig{} } func (m *CommitSig) String() string { return proto.CompactTextString(m) } func (*CommitSig) ProtoMessage() {} func (*CommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{15} + return fileDescriptor_d3a6e55e2345de56, []int{17} } func (m *CommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1215,6 +1323,13 @@ func (m *CommitSig) GetSignature() []byte { return nil } +func (m *CommitSig) GetVoteExtension() *VoteExtensionToSign { + if m != nil { + return m.VoteExtension + } + return nil +} + type Proposal struct { Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` @@ -1229,7 +1344,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{16} + return fileDescriptor_d3a6e55e2345de56, []int{18} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1316,7 +1431,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{17} + return fileDescriptor_d3a6e55e2345de56, []int{19} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1483,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{18} + return fileDescriptor_d3a6e55e2345de56, []int{20} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1422,7 +1537,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{19} + return fileDescriptor_d3a6e55e2345de56, []int{21} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1490,7 +1605,7 @@ func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{20} + return fileDescriptor_d3a6e55e2345de56, []int{22} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1557,6 +1672,8 @@ func init() { proto.RegisterType((*Message)(nil), "tendermint.types.Message") proto.RegisterType((*DataAvailabilityHeader)(nil), "tendermint.types.DataAvailabilityHeader") proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") + proto.RegisterType((*VoteExtension)(nil), "tendermint.types.VoteExtension") + proto.RegisterType((*VoteExtensionToSign)(nil), "tendermint.types.VoteExtensionToSign") proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") @@ -1569,119 +1686,126 @@ func init() { func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0xdb, 0xd8, - 0x15, 0x36, 0x25, 0xd9, 0x92, 0x8e, 0x24, 0x5b, 0xbe, 0x75, 0x1c, 0x59, 0x49, 0x64, 0x95, 0x7d, - 0x8c, 0xe7, 0x51, 0x39, 0xcd, 0x14, 0x7d, 0x00, 0xed, 0x60, 0x24, 0xdb, 0x13, 0xab, 0xe3, 0x87, - 0x4a, 0x79, 0xd2, 0xc7, 0x86, 0xb8, 0x12, 0x6f, 0x24, 0x36, 0x14, 0x2f, 0xc1, 0x7b, 0xe5, 0xd8, - 0x59, 0x76, 0x55, 0x78, 0x95, 0x55, 0x77, 0x5e, 0xb5, 0x8b, 0xee, 0xfb, 0x07, 0x8a, 0xae, 0x66, - 0x53, 0x60, 0x76, 0xed, 0xa6, 0xd3, 0x22, 0x29, 0x8a, 0xfe, 0x8c, 0xe2, 0x3e, 0x48, 0x51, 0x96, - 0x94, 0x06, 0x41, 0x30, 0x1b, 0x81, 0x3c, 0xe7, 0x3b, 0xf7, 0x9e, 0xf7, 0x39, 0x14, 0xdc, 0xe5, - 0xc4, 0x77, 0x48, 0x38, 0x72, 0x7d, 0xbe, 0xcb, 0x2f, 0x03, 0xc2, 0xd4, 0x6f, 0x23, 0x08, 0x29, - 0xa7, 0xa8, 0x3c, 0xe1, 0x36, 0x24, 0xbd, 0xba, 0x31, 0xa0, 0x03, 0x2a, 0x99, 0xbb, 0xe2, 0x49, - 0xe1, 0xaa, 0xdb, 0x03, 0x4a, 0x07, 0x1e, 0xd9, 0x95, 0x6f, 0xbd, 0xf1, 0xe3, 0x5d, 0xee, 0x8e, - 0x08, 0xe3, 0x78, 0x14, 0x68, 0xc0, 0xbd, 0xc4, 0x35, 0xfd, 0xf0, 0x32, 0xe0, 0x54, 0x60, 0xe9, - 0x63, 0xcd, 0xae, 0x25, 0xd8, 0xe7, 0x24, 0x64, 0x2e, 0xf5, 0x93, 0x7a, 0x54, 0xeb, 0x33, 0x5a, - 0x9e, 0x63, 0xcf, 0x75, 0x30, 0xa7, 0xa1, 0x42, 0x98, 0x3f, 0x82, 0x52, 0x07, 0x87, 0xbc, 0x4b, - 0xf8, 0x21, 0xc1, 0x0e, 0x09, 0xd1, 0x06, 0x2c, 0x73, 0xca, 0xb1, 0x57, 0x31, 0xea, 0xc6, 0x4e, - 0xc9, 0x52, 0x2f, 0x08, 0x41, 0x66, 0x88, 0xd9, 0xb0, 0x92, 0xaa, 0x1b, 0x3b, 0x45, 0x4b, 0x3e, - 0x9b, 0x43, 0xc8, 0x08, 0x51, 0x21, 0xe1, 0xfa, 0x0e, 0xb9, 0x88, 0x24, 0xe4, 0x8b, 0xa0, 0xf6, - 0x2e, 0x39, 0x61, 0x5a, 0x44, 0xbd, 0xa0, 0xef, 0xc1, 0xb2, 0xd4, 0xbf, 0x92, 0xae, 0x1b, 0x3b, - 0x85, 0x07, 0x95, 0x46, 0xc2, 0x51, 0xca, 0xbe, 0x46, 0x47, 0xf0, 0x5b, 0x99, 0xcf, 0xbf, 0xdc, - 0x5e, 0xb2, 0x14, 0xd8, 0xf4, 0x20, 0xdb, 0xf2, 0x68, 0xff, 0x49, 0x7b, 0x3f, 0x56, 0xc4, 0x98, - 0x28, 0x82, 0x8e, 0x61, 0x2d, 0xc0, 0x21, 0xb7, 0x19, 0xe1, 0xf6, 0x50, 0x5a, 0x21, 0x2f, 0x2d, - 0x3c, 0xd8, 0x6e, 0xdc, 0x8c, 0x43, 0x63, 0xca, 0x58, 0x7d, 0x4b, 0x29, 0x48, 0x12, 0xcd, 0xff, - 0x64, 0x60, 0x45, 0x3b, 0xe3, 0x27, 0x90, 0xd5, 0x6e, 0x95, 0x17, 0x16, 0x1e, 0xdc, 0x4b, 0x9e, - 0xa8, 0x59, 0x8d, 0x3d, 0xea, 0x33, 0xe2, 0xb3, 0x31, 0xd3, 0xe7, 0x45, 0x32, 0xe8, 0xdb, 0x90, - 0xeb, 0x0f, 0xb1, 0xeb, 0xdb, 0xae, 0x23, 0x35, 0xca, 0xb7, 0x0a, 0x2f, 0xbe, 0xdc, 0xce, 0xee, - 0x09, 0x5a, 0x7b, 0xdf, 0xca, 0x4a, 0x66, 0xdb, 0x41, 0x9b, 0xb0, 0x32, 0x24, 0xee, 0x60, 0xc8, - 0xa5, 0x5b, 0xd2, 0x96, 0x7e, 0x43, 0x3f, 0x84, 0x8c, 0x48, 0x88, 0x4a, 0x46, 0xde, 0x5d, 0x6d, - 0xa8, 0x6c, 0x69, 0x44, 0xd9, 0xd2, 0x38, 0x8b, 0xb2, 0xa5, 0x95, 0x13, 0x17, 0x3f, 0xff, 0xe7, - 0xb6, 0x61, 0x49, 0x09, 0xb4, 0x07, 0x25, 0x0f, 0x33, 0x6e, 0xf7, 0x84, 0xdb, 0xc4, 0xf5, 0xcb, - 0xf2, 0x88, 0xad, 0x59, 0x87, 0x68, 0xc7, 0x6a, 0xd5, 0x0b, 0x42, 0x4a, 0x91, 0x1c, 0xb4, 0x03, - 0x65, 0x79, 0x48, 0x9f, 0x8e, 0x46, 0x2e, 0xb7, 0xa5, 0xdf, 0x57, 0xa4, 0xdf, 0x57, 0x05, 0x7d, - 0x4f, 0x92, 0x0f, 0x45, 0x04, 0xee, 0x40, 0xde, 0xc1, 0x1c, 0x2b, 0x48, 0x56, 0x42, 0x72, 0x82, - 0x20, 0x99, 0xef, 0xc0, 0x5a, 0x9c, 0x75, 0x4c, 0x41, 0x72, 0xea, 0x94, 0x09, 0x59, 0x02, 0xef, - 0xc3, 0x86, 0x4f, 0x2e, 0xb8, 0x7d, 0x13, 0x9d, 0x97, 0x68, 0x24, 0x78, 0x8f, 0xa6, 0x25, 0xbe, - 0x05, 0xab, 0xfd, 0xc8, 0xf9, 0x0a, 0x0b, 0x12, 0x5b, 0x8a, 0xa9, 0x12, 0xb6, 0x05, 0x39, 0x1c, - 0x04, 0x0a, 0x50, 0x90, 0x80, 0x2c, 0x0e, 0x02, 0xc9, 0x7a, 0x0f, 0xd6, 0xa5, 0x8d, 0x21, 0x61, - 0x63, 0x8f, 0xeb, 0x43, 0x8a, 0x12, 0xb3, 0x26, 0x18, 0x96, 0xa2, 0x4b, 0xec, 0x37, 0xa0, 0x44, - 0xce, 0x5d, 0x87, 0xf8, 0x7d, 0xa2, 0x70, 0x25, 0x89, 0x2b, 0x46, 0x44, 0x09, 0x7a, 0x17, 0xca, - 0x41, 0x48, 0x03, 0xca, 0x48, 0x68, 0x63, 0xc7, 0x09, 0x09, 0x63, 0x95, 0x55, 0x75, 0x5e, 0x44, - 0x6f, 0x2a, 0xb2, 0xf9, 0x9b, 0x14, 0x64, 0xf6, 0x31, 0xc7, 0xa8, 0x0c, 0x69, 0x7e, 0xc1, 0x2a, - 0x46, 0x3d, 0xbd, 0x53, 0xb4, 0xc4, 0x23, 0x1a, 0x42, 0xc5, 0xf5, 0x39, 0x09, 0x47, 0xc4, 0x71, - 0x31, 0x27, 0x36, 0xe3, 0xe2, 0x37, 0xa4, 0x94, 0x33, 0x9d, 0xdb, 0x3b, 0xb3, 0xa1, 0x6c, 0x27, - 0x24, 0xba, 0x42, 0xc0, 0x12, 0x78, 0x1d, 0xd9, 0x4d, 0x77, 0x2e, 0x17, 0x7d, 0x0c, 0xb9, 0x48, - 0x7f, 0x5d, 0x94, 0xb5, 0xd9, 0x93, 0x0f, 0x34, 0xe2, 0xc8, 0x65, 0x5c, 0x9f, 0x17, 0x4b, 0xa1, - 0x1f, 0x43, 0x6e, 0x44, 0x18, 0xc3, 0x03, 0xc2, 0xe2, 0x4c, 0x9d, 0x39, 0xe1, 0x58, 0x23, 0x22, - 0xe9, 0x48, 0xc2, 0x7c, 0x9e, 0x82, 0x5b, 0xfb, 0xe3, 0xc0, 0x73, 0xfb, 0x98, 0x93, 0x47, 0x94, - 0x93, 0xe8, 0x2e, 0xf4, 0x1d, 0x58, 0x39, 0xa7, 0x9c, 0xd8, 0x58, 0xd7, 0xde, 0xe6, 0xec, 0xa9, - 0x02, 0x6f, 0x2d, 0x0b, 0x54, 0x33, 0x86, 0xf7, 0xb4, 0x83, 0x5e, 0x09, 0x6f, 0xa1, 0x0f, 0x00, - 0xc9, 0xd6, 0x66, 0x9f, 0x53, 0xee, 0xfa, 0x03, 0x3b, 0xa0, 0x4f, 0x49, 0xa8, 0xeb, 0xaf, 0x2c, - 0x39, 0x8f, 0x24, 0xa3, 0x23, 0xe8, 0x53, 0x39, 0xac, 0xa1, 0x19, 0x09, 0x9d, 0xe4, 0xb0, 0x02, - 0xb6, 0x20, 0x1f, 0xf7, 0x70, 0x5d, 0x74, 0xaf, 0x57, 0xb7, 0x13, 0x31, 0xf3, 0xaf, 0x29, 0xd8, - 0x3a, 0x12, 0x0d, 0x60, 0xcf, 0x73, 0x89, 0xcf, 0x9b, 0x9c, 0xe3, 0xfe, 0x93, 0xd8, 0x2d, 0x6d, - 0x58, 0xef, 0x53, 0xff, 0xb1, 0xe7, 0xf6, 0xa5, 0xde, 0xb2, 0xc2, 0xb5, 0x87, 0xee, 0xce, 0x9a, - 0x2c, 0xcf, 0x91, 0x05, 0x6d, 0x95, 0x13, 0x62, 0x92, 0x22, 0x12, 0x5a, 0xd4, 0x36, 0xf5, 0x6d, - 0xdd, 0x7e, 0x52, 0xd2, 0xa6, 0xa2, 0x22, 0x1e, 0xaa, 0x26, 0x74, 0x02, 0x1b, 0xbd, 0xcb, 0x67, - 0xd8, 0xe7, 0xae, 0x4f, 0x12, 0xa5, 0x59, 0x49, 0xd7, 0xd3, 0x3b, 0x85, 0x07, 0x77, 0xe6, 0x78, - 0x39, 0xc2, 0x58, 0x5f, 0x8b, 0x05, 0x27, 0x75, 0xbb, 0xc0, 0xf1, 0x99, 0x05, 0x8e, 0x7f, 0x1b, - 0xfe, 0xfc, 0xb7, 0x01, 0xb9, 0xd8, 0x7d, 0x18, 0x6e, 0x3b, 0x51, 0xba, 0xd9, 0x32, 0x61, 0xe2, - 0xf4, 0x57, 0x4e, 0x7c, 0x67, 0xd6, 0xa2, 0xb9, 0xf9, 0x79, 0xb8, 0x64, 0xdd, 0x72, 0xe6, 0x26, - 0xae, 0x0f, 0x77, 0x3d, 0xe1, 0x3a, 0xbb, 0x2f, 0xe3, 0x67, 0x63, 0x19, 0xc0, 0xc9, 0x3d, 0x2a, - 0x3f, 0xdf, 0x5f, 0x10, 0xac, 0x79, 0x41, 0x3f, 0x5c, 0xb2, 0xb6, 0xbc, 0x45, 0xcc, 0xd6, 0x32, - 0xa4, 0xd9, 0x78, 0x64, 0x1e, 0x41, 0x31, 0x59, 0xa7, 0xa2, 0x2e, 0x13, 0xa6, 0xa5, 0xe7, 0xd7, - 0x65, 0x7c, 0xc8, 0x8d, 0xaa, 0x36, 0x3f, 0x82, 0xcd, 0xf9, 0xfd, 0x04, 0x7d, 0x13, 0x56, 0x43, - 0xfc, 0x54, 0x35, 0x23, 0xdb, 0x73, 0x19, 0xd7, 0x8d, 0xab, 0x18, 0xe2, 0xa7, 0x12, 0x21, 0x6e, - 0x37, 0x7f, 0x0a, 0xb9, 0xa8, 0xe6, 0xd1, 0x47, 0x50, 0x8a, 0xea, 0x7d, 0x22, 0x30, 0x77, 0x1a, - 0x69, 0x11, 0xab, 0x18, 0xe1, 0xe5, 0x59, 0x1f, 0x43, 0x56, 0x33, 0xd0, 0xd7, 0xa1, 0xe8, 0xe3, - 0x11, 0x61, 0x01, 0xee, 0x13, 0x31, 0xd7, 0xd4, 0x1e, 0x50, 0x88, 0x69, 0x6d, 0x47, 0xac, 0x08, - 0x62, 0xf6, 0x44, 0xbb, 0x8a, 0x78, 0x36, 0x7f, 0x01, 0x9b, 0xa2, 0xd3, 0x36, 0xcf, 0xb1, 0xeb, - 0xe1, 0x9e, 0xeb, 0xb9, 0xfc, 0x52, 0x8f, 0xf8, 0x3b, 0x90, 0x0f, 0xa9, 0xb6, 0x46, 0x1b, 0x92, - 0x0b, 0xa9, 0x32, 0x44, 0xdc, 0xd6, 0xa7, 0xde, 0x78, 0xe4, 0xc7, 0xad, 0x57, 0xf0, 0x0b, 0x8a, - 0x26, 0x21, 0xe6, 0x7f, 0x53, 0x90, 0x11, 0xd1, 0x47, 0x1f, 0x42, 0x46, 0xd8, 0x20, 0x35, 0x5a, - 0x9d, 0xb7, 0x7a, 0x74, 0xdd, 0x81, 0x4f, 0x9c, 0x63, 0x36, 0x38, 0xbb, 0x0c, 0x88, 0x25, 0xc1, - 0x89, 0xc9, 0x9f, 0x9a, 0x9a, 0xfc, 0x1b, 0xb0, 0x1c, 0xd2, 0xb1, 0xef, 0xc8, 0x86, 0xb4, 0x6c, - 0xa9, 0x17, 0x74, 0x00, 0xb9, 0x78, 0xa0, 0x67, 0xfe, 0xdf, 0x40, 0x5f, 0x13, 0x01, 0x15, 0xeb, - 0x86, 0x26, 0x58, 0xd9, 0x9e, 0x9e, 0xeb, 0x6f, 0xa1, 0xa6, 0xd0, 0xfb, 0xb0, 0x3e, 0x69, 0x88, - 0xd1, 0x9c, 0x53, 0xcb, 0x41, 0x39, 0x66, 0xe8, 0x41, 0x37, 0xdd, 0x3d, 0xd5, 0xae, 0x98, 0x95, - 0x76, 0x4d, 0xba, 0x67, 0x5b, 0x2e, 0x8d, 0x77, 0x21, 0xcf, 0xdc, 0x81, 0x8f, 0xf9, 0x38, 0x24, - 0x7a, 0x49, 0x98, 0x10, 0xcc, 0x3f, 0x1b, 0xb0, 0xa2, 0x96, 0x8e, 0x84, 0xdf, 0x8c, 0xf9, 0x7e, - 0x4b, 0x2d, 0xf2, 0x5b, 0xfa, 0xcd, 0xfd, 0xd6, 0x04, 0x88, 0x95, 0x11, 0xa3, 0x6e, 0x41, 0xff, - 0x53, 0x2a, 0x76, 0xdd, 0x81, 0xae, 0xa9, 0x84, 0x90, 0xf9, 0x0f, 0x03, 0xf2, 0x31, 0x1f, 0x35, - 0xa1, 0x14, 0xe9, 0x65, 0x3f, 0xf6, 0xf0, 0x40, 0xe7, 0xce, 0xbd, 0x85, 0xca, 0x7d, 0xe2, 0xe1, - 0x81, 0x55, 0xd0, 0xfa, 0x88, 0x97, 0xf9, 0x71, 0x48, 0x2d, 0x88, 0xc3, 0x54, 0xe0, 0xd3, 0x6f, - 0x16, 0xf8, 0xa9, 0x10, 0x65, 0x6e, 0x86, 0xe8, 0x4f, 0x29, 0xc8, 0x75, 0xe4, 0x9a, 0x83, 0xbd, - 0xaf, 0xa2, 0x22, 0xee, 0x40, 0x3e, 0xa0, 0x9e, 0xad, 0x38, 0x19, 0xc9, 0xc9, 0x05, 0xd4, 0xb3, - 0x66, 0xc2, 0xbe, 0xfc, 0x96, 0xca, 0x65, 0xe5, 0x2d, 0x78, 0x2d, 0x7b, 0xd3, 0x6b, 0x21, 0x14, - 0x95, 0x2b, 0x74, 0x4f, 0xba, 0x2f, 0x7c, 0x20, 0xbf, 0x63, 0x8c, 0xd9, 0xcf, 0x24, 0xa5, 0xb6, - 0x42, 0x5a, 0x1a, 0x27, 0x24, 0xd4, 0x96, 0xae, 0x87, 0x4b, 0x65, 0x51, 0x5a, 0x5a, 0x1a, 0x67, - 0xfe, 0xce, 0x00, 0x98, 0x2c, 0x07, 0xe2, 0x83, 0x81, 0x49, 0x15, 0xec, 0xa9, 0x9b, 0x6b, 0x8b, - 0x82, 0xa6, 0xef, 0x2f, 0xb2, 0xa4, 0xde, 0x7b, 0x50, 0x9a, 0x24, 0x23, 0x23, 0x91, 0x32, 0xb5, - 0x57, 0xec, 0x08, 0x5d, 0xc2, 0xad, 0xe2, 0x79, 0xe2, 0xcd, 0xfc, 0x8b, 0x01, 0x79, 0xa9, 0xd3, - 0x31, 0xe1, 0x78, 0x2a, 0x86, 0xc6, 0x9b, 0xc7, 0xf0, 0x1e, 0x80, 0x3a, 0x86, 0xb9, 0xcf, 0x88, - 0xce, 0xac, 0xbc, 0xa4, 0x74, 0xdd, 0x67, 0x04, 0x7d, 0x3f, 0x76, 0x78, 0xfa, 0xd5, 0x0e, 0xd7, - 0x25, 0x1d, 0xb9, 0xfd, 0x36, 0x64, 0xfd, 0xf1, 0xc8, 0x16, 0xcb, 0xbb, 0x5a, 0x60, 0x56, 0xfc, - 0xf1, 0xe8, 0xec, 0x82, 0x99, 0xbf, 0x86, 0xec, 0xd9, 0x85, 0xfc, 0x92, 0x55, 0x03, 0x86, 0xea, - 0xcf, 0x27, 0x35, 0xae, 0x72, 0x82, 0x20, 0xbf, 0x16, 0xe6, 0xcc, 0x2a, 0xd4, 0x78, 0xcd, 0x6f, - 0x64, 0xfd, 0x75, 0xfc, 0xde, 0xdf, 0x0c, 0x28, 0x24, 0xfa, 0x03, 0xfa, 0x2e, 0xdc, 0x6a, 0x1d, - 0x9d, 0xee, 0x7d, 0x6a, 0xb7, 0xf7, 0xed, 0x4f, 0x8e, 0x9a, 0x0f, 0xed, 0xcf, 0x4e, 0x3e, 0x3d, - 0x39, 0xfd, 0xf9, 0x49, 0x79, 0xa9, 0xba, 0x79, 0x75, 0x5d, 0x47, 0x09, 0xec, 0x67, 0xfe, 0x13, - 0x9f, 0x3e, 0xf5, 0xd1, 0x2e, 0x6c, 0x4c, 0x8b, 0x34, 0x5b, 0xdd, 0x83, 0x93, 0xb3, 0xb2, 0x51, - 0xbd, 0x75, 0x75, 0x5d, 0x5f, 0x4f, 0x48, 0x34, 0x7b, 0x8c, 0xf8, 0x7c, 0x56, 0x60, 0xef, 0xf4, - 0xf8, 0xb8, 0x7d, 0x56, 0x4e, 0xcd, 0x08, 0xe8, 0x86, 0xfd, 0x2e, 0xac, 0x4f, 0x0b, 0x9c, 0xb4, - 0x8f, 0xca, 0xe9, 0x2a, 0xba, 0xba, 0xae, 0xaf, 0x26, 0xd0, 0x27, 0xae, 0x57, 0xcd, 0xfd, 0xf6, - 0xf7, 0xb5, 0xa5, 0x3f, 0xfe, 0xa1, 0x66, 0x08, 0xcb, 0x4a, 0x53, 0x3d, 0x02, 0x7d, 0x00, 0xb7, - 0xbb, 0xed, 0x87, 0x27, 0x07, 0xfb, 0xf6, 0x71, 0xf7, 0xa1, 0x7d, 0xf6, 0xcb, 0xce, 0x41, 0xc2, - 0xba, 0xb5, 0xab, 0xeb, 0x7a, 0x41, 0x9b, 0xb4, 0x08, 0xdd, 0xb1, 0x0e, 0x1e, 0x9d, 0x9e, 0x1d, - 0x94, 0x0d, 0x85, 0xee, 0x84, 0x44, 0xec, 0x80, 0x12, 0x7d, 0x1f, 0xb6, 0xe6, 0xa0, 0x63, 0xc3, - 0xd6, 0xaf, 0xae, 0xeb, 0xa5, 0x4e, 0x48, 0x54, 0xfd, 0x48, 0x89, 0x06, 0x54, 0x66, 0x25, 0x4e, - 0x3b, 0xa7, 0xdd, 0xe6, 0x51, 0xb9, 0x5e, 0x2d, 0x5f, 0x5d, 0xd7, 0x8b, 0x51, 0x33, 0x14, 0xf8, - 0x89, 0x65, 0xad, 0x9f, 0x7d, 0xfe, 0xa2, 0x66, 0x7c, 0xf1, 0xa2, 0x66, 0xfc, 0xeb, 0x45, 0xcd, - 0x78, 0xfe, 0xb2, 0xb6, 0xf4, 0xc5, 0xcb, 0xda, 0xd2, 0xdf, 0x5f, 0xd6, 0x96, 0x7e, 0xf5, 0x83, - 0x81, 0xcb, 0x87, 0xe3, 0x5e, 0xa3, 0x4f, 0x47, 0xbb, 0xc9, 0x7f, 0x6f, 0x26, 0x8f, 0xea, 0x5f, - 0xa4, 0x9b, 0xff, 0xec, 0xf4, 0x56, 0x24, 0xfd, 0xc3, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x76, - 0x17, 0xbb, 0x3e, 0x9a, 0x12, 0x00, 0x00, + // 1895 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, + 0x11, 0xd6, 0x90, 0x94, 0x48, 0x16, 0x49, 0x89, 0xea, 0x95, 0x65, 0x8a, 0x96, 0x29, 0x66, 0x92, + 0x8d, 0xb5, 0x8f, 0x50, 0x8e, 0x37, 0xc8, 0x03, 0x48, 0x9c, 0x25, 0x25, 0xd9, 0x62, 0x56, 0x0f, + 0x66, 0xc8, 0x75, 0x1e, 0x97, 0x41, 0x93, 0xd3, 0x22, 0x27, 0x1e, 0x4e, 0x0f, 0x66, 0x9a, 0xb2, + 0xe4, 0x63, 0x4e, 0x81, 0x4e, 0x3e, 0xe5, 0xa6, 0x53, 0x72, 0x08, 0x90, 0x4b, 0x80, 0xfc, 0x81, + 0x20, 0xa7, 0xbd, 0x04, 0xd8, 0x5b, 0x72, 0xda, 0x04, 0x76, 0x90, 0xdf, 0x11, 0xf4, 0x63, 0x86, + 0x43, 0x91, 0xb4, 0x0d, 0x43, 0xd8, 0x0b, 0xc1, 0xa9, 0xfa, 0xaa, 0xbb, 0xba, 0xea, 0xab, 0xea, + 0x9a, 0x81, 0x4d, 0x46, 0x5c, 0x8b, 0xf8, 0x43, 0xdb, 0x65, 0x3b, 0xec, 0xc2, 0x23, 0x81, 0xfc, + 0xad, 0x79, 0x3e, 0x65, 0x14, 0x15, 0xc7, 0xda, 0x9a, 0x90, 0x97, 0xd7, 0xfa, 0xb4, 0x4f, 0x85, + 0x72, 0x87, 0xff, 0x93, 0xb8, 0xf2, 0x56, 0x9f, 0xd2, 0xbe, 0x43, 0x76, 0xc4, 0x53, 0x77, 0x74, + 0xba, 0xc3, 0xec, 0x21, 0x09, 0x18, 0x1e, 0x7a, 0x0a, 0x70, 0x37, 0xb6, 0x4d, 0xcf, 0xbf, 0xf0, + 0x18, 0xe5, 0x58, 0x7a, 0xaa, 0xd4, 0x95, 0x98, 0xfa, 0x8c, 0xf8, 0x81, 0x4d, 0xdd, 0xb8, 0x1f, + 0xe5, 0xea, 0x94, 0x97, 0x67, 0xd8, 0xb1, 0x2d, 0xcc, 0xa8, 0x2f, 0x11, 0xfa, 0x8f, 0xa0, 0xd0, + 0xc2, 0x3e, 0x6b, 0x13, 0x76, 0x40, 0xb0, 0x45, 0x7c, 0xb4, 0x06, 0x8b, 0x8c, 0x32, 0xec, 0x94, + 0xb4, 0xaa, 0xb6, 0x5d, 0x30, 0xe4, 0x03, 0x42, 0x90, 0x1a, 0xe0, 0x60, 0x50, 0x4a, 0x54, 0xb5, + 0xed, 0xbc, 0x21, 0xfe, 0xeb, 0x03, 0x48, 0x71, 0x53, 0x6e, 0x61, 0xbb, 0x16, 0x39, 0x0f, 0x2d, + 0xc4, 0x03, 0x97, 0x76, 0x2f, 0x18, 0x09, 0x94, 0x89, 0x7c, 0x40, 0xdf, 0x83, 0x45, 0xe1, 0x7f, + 0x29, 0x59, 0xd5, 0xb6, 0x73, 0x0f, 0x4a, 0xb5, 0x58, 0xa0, 0xe4, 0xf9, 0x6a, 0x2d, 0xae, 0x6f, + 0xa4, 0xbe, 0xf8, 0x6a, 0x6b, 0xc1, 0x90, 0x60, 0xdd, 0x81, 0x74, 0xc3, 0xa1, 0xbd, 0xa7, 0xcd, + 0xbd, 0xc8, 0x11, 0x6d, 0xec, 0x08, 0x3a, 0x82, 0x15, 0x0f, 0xfb, 0xcc, 0x0c, 0x08, 0x33, 0x07, + 0xe2, 0x14, 0x62, 0xd3, 0xdc, 0x83, 0xad, 0xda, 0xf5, 0x3c, 0xd4, 0x26, 0x0e, 0xab, 0x76, 0x29, + 0x78, 0x71, 0xa1, 0xfe, 0xbf, 0x14, 0x2c, 0xa9, 0x60, 0xfc, 0x04, 0xd2, 0x2a, 0xac, 0x62, 0xc3, + 0xdc, 0x83, 0xbb, 0xf1, 0x15, 0x95, 0xaa, 0xb6, 0x4b, 0xdd, 0x80, 0xb8, 0xc1, 0x28, 0x50, 0xeb, + 0x85, 0x36, 0xe8, 0xdb, 0x90, 0xe9, 0x0d, 0xb0, 0xed, 0x9a, 0xb6, 0x25, 0x3c, 0xca, 0x36, 0x72, + 0x2f, 0xbf, 0xda, 0x4a, 0xef, 0x72, 0x59, 0x73, 0xcf, 0x48, 0x0b, 0x65, 0xd3, 0x42, 0xeb, 0xb0, + 0x34, 0x20, 0x76, 0x7f, 0xc0, 0x44, 0x58, 0x92, 0x86, 0x7a, 0x42, 0x3f, 0x84, 0x14, 0x27, 0x44, + 0x29, 0x25, 0xf6, 0x2e, 0xd7, 0x24, 0x5b, 0x6a, 0x21, 0x5b, 0x6a, 0x9d, 0x90, 0x2d, 0x8d, 0x0c, + 0xdf, 0xf8, 0xc5, 0xbf, 0xb7, 0x34, 0x43, 0x58, 0xa0, 0x5d, 0x28, 0x38, 0x38, 0x60, 0x66, 0x97, + 0x87, 0x8d, 0x6f, 0xbf, 0x28, 0x96, 0xd8, 0x98, 0x0e, 0x88, 0x0a, 0xac, 0x72, 0x3d, 0xc7, 0xad, + 0xa4, 0xc8, 0x42, 0xdb, 0x50, 0x14, 0x8b, 0xf4, 0xe8, 0x70, 0x68, 0x33, 0x53, 0xc4, 0x7d, 0x49, + 0xc4, 0x7d, 0x99, 0xcb, 0x77, 0x85, 0xf8, 0x80, 0x67, 0xe0, 0x0e, 0x64, 0x2d, 0xcc, 0xb0, 0x84, + 0xa4, 0x05, 0x24, 0xc3, 0x05, 0x42, 0x79, 0x0f, 0x56, 0x22, 0xd6, 0x05, 0x12, 0x92, 0x91, 0xab, + 0x8c, 0xc5, 0x02, 0x78, 0x1f, 0xd6, 0x5c, 0x72, 0xce, 0xcc, 0xeb, 0xe8, 0xac, 0x40, 0x23, 0xae, + 0x7b, 0x32, 0x69, 0xf1, 0x3e, 0x2c, 0xf7, 0xc2, 0xe0, 0x4b, 0x2c, 0x08, 0x6c, 0x21, 0x92, 0x0a, + 0xd8, 0x06, 0x64, 0xb0, 0xe7, 0x49, 0x40, 0x4e, 0x00, 0xd2, 0xd8, 0xf3, 0x84, 0xea, 0x43, 0x58, + 0x15, 0x67, 0xf4, 0x49, 0x30, 0x72, 0x98, 0x5a, 0x24, 0x2f, 0x30, 0x2b, 0x5c, 0x61, 0x48, 0xb9, + 0xc0, 0x7e, 0x13, 0x0a, 0xe4, 0xcc, 0xb6, 0x88, 0xdb, 0x23, 0x12, 0x57, 0x10, 0xb8, 0x7c, 0x28, + 0x14, 0xa0, 0x0f, 0xa0, 0xe8, 0xf9, 0xd4, 0xa3, 0x01, 0xf1, 0x4d, 0x6c, 0x59, 0x3e, 0x09, 0x82, + 0xd2, 0xb2, 0x5c, 0x2f, 0x94, 0xd7, 0xa5, 0x58, 0xff, 0x6d, 0x02, 0x52, 0x7b, 0x98, 0x61, 0x54, + 0x84, 0x24, 0x3b, 0x0f, 0x4a, 0x5a, 0x35, 0xb9, 0x9d, 0x37, 0xf8, 0x5f, 0x34, 0x80, 0x92, 0xed, + 0x32, 0xe2, 0x0f, 0x89, 0x65, 0x63, 0x46, 0xcc, 0x80, 0xf1, 0x5f, 0x9f, 0x52, 0x16, 0x28, 0x6e, + 0x6f, 0x4f, 0xa7, 0xb2, 0x19, 0xb3, 0x68, 0x73, 0x03, 0x83, 0xe3, 0x55, 0x66, 0xd7, 0xed, 0x99, + 0x5a, 0xf4, 0x29, 0x64, 0x42, 0xff, 0x55, 0x51, 0x56, 0xa6, 0x57, 0xde, 0x57, 0x88, 0x43, 0x3b, + 0x60, 0x6a, 0xbd, 0xc8, 0x0a, 0xfd, 0x18, 0x32, 0x43, 0x12, 0x04, 0xb8, 0x4f, 0x82, 0x88, 0xa9, + 0x53, 0x2b, 0x1c, 0x29, 0x44, 0x68, 0x1d, 0x5a, 0xe8, 0x2f, 0x12, 0x70, 0x6b, 0x6f, 0xe4, 0x39, + 0x76, 0x0f, 0x33, 0xf2, 0x84, 0x32, 0x12, 0xee, 0x85, 0xbe, 0x03, 0x4b, 0x67, 0x94, 0x11, 0x13, + 0xab, 0xda, 0x5b, 0x9f, 0x5e, 0x95, 0xe3, 0x8d, 0x45, 0x8e, 0xaa, 0x47, 0xf0, 0xae, 0x0a, 0xd0, + 0x6b, 0xe1, 0x0d, 0xf4, 0x31, 0x20, 0xd1, 0xda, 0xcc, 0x33, 0xca, 0x6c, 0xb7, 0x6f, 0x7a, 0xf4, + 0x19, 0xf1, 0x55, 0xfd, 0x15, 0x85, 0xe6, 0x89, 0x50, 0xb4, 0xb8, 0x7c, 0x82, 0xc3, 0x0a, 0x9a, + 0x12, 0xd0, 0x31, 0x87, 0x25, 0xb0, 0x01, 0xd9, 0xa8, 0x87, 0xab, 0xa2, 0x7b, 0xbb, 0xba, 0x1d, + 0x9b, 0xe9, 0xff, 0x48, 0xc0, 0xc6, 0x21, 0x6f, 0x00, 0xbb, 0x8e, 0x4d, 0x5c, 0x56, 0x67, 0x0c, + 0xf7, 0x9e, 0x46, 0x61, 0x69, 0xc2, 0x6a, 0x8f, 0xba, 0xa7, 0x8e, 0xdd, 0x13, 0x7e, 0x8b, 0x0a, + 0x57, 0x11, 0xda, 0x9c, 0x3e, 0xb2, 0x58, 0x47, 0x14, 0xb4, 0x51, 0x8c, 0x99, 0x09, 0x09, 0x27, + 0x34, 0xaf, 0x6d, 0xea, 0x9a, 0xaa, 0xfd, 0x24, 0xc4, 0x99, 0xf2, 0x52, 0x78, 0x20, 0x9b, 0xd0, + 0x31, 0xac, 0x75, 0x2f, 0x9e, 0x63, 0x97, 0xd9, 0x2e, 0x89, 0x95, 0x66, 0x29, 0x59, 0x4d, 0x6e, + 0xe7, 0x1e, 0xdc, 0x99, 0x11, 0xe5, 0x10, 0x63, 0xbc, 0x17, 0x19, 0x8e, 0xeb, 0x76, 0x4e, 0xe0, + 0x53, 0x73, 0x02, 0x7f, 0x13, 0xf1, 0xfc, 0xaf, 0x06, 0x99, 0x28, 0x7c, 0x18, 0x6e, 0x5b, 0x21, + 0xdd, 0x4c, 0x41, 0x98, 0x88, 0xfe, 0x32, 0x88, 0xf7, 0xa6, 0x4f, 0x34, 0x93, 0x9f, 0x07, 0x0b, + 0xc6, 0x2d, 0x6b, 0x26, 0x71, 0x5d, 0xd8, 0x74, 0x78, 0xe8, 0xcc, 0x9e, 0xc8, 0x9f, 0x89, 0x45, + 0x02, 0xc7, 0xfb, 0x48, 0x7e, 0x7e, 0x34, 0x27, 0x59, 0xb3, 0x92, 0x7e, 0xb0, 0x60, 0x6c, 0x38, + 0xf3, 0x94, 0x8d, 0x45, 0x48, 0x06, 0xa3, 0xa1, 0x7e, 0x08, 0xf9, 0x78, 0x9d, 0xf2, 0xba, 0x8c, + 0x1d, 0x2d, 0x39, 0xbb, 0x2e, 0xa3, 0x45, 0xae, 0x55, 0xb5, 0xfe, 0x10, 0xd6, 0x67, 0xf7, 0x13, + 0xf4, 0x2d, 0x58, 0xf6, 0xf1, 0x33, 0xd9, 0x8c, 0x4c, 0xc7, 0x0e, 0x98, 0x6a, 0x5c, 0x79, 0x1f, + 0x3f, 0x13, 0x08, 0xbe, 0xbb, 0xfe, 0x33, 0xc8, 0x84, 0x35, 0x8f, 0x1e, 0x42, 0x21, 0xac, 0xf7, + 0xb1, 0xc1, 0xcc, 0xdb, 0x48, 0x99, 0x18, 0xf9, 0x10, 0x2f, 0xd6, 0xfa, 0x14, 0xd2, 0x4a, 0x81, + 0xbe, 0x01, 0x79, 0x17, 0x0f, 0x49, 0xe0, 0xe1, 0x1e, 0xe1, 0xf7, 0x9a, 0x9c, 0x03, 0x72, 0x91, + 0xac, 0x69, 0xf1, 0x11, 0x81, 0xdf, 0x3d, 0xe1, 0xac, 0xc2, 0xff, 0xeb, 0xbf, 0x84, 0x75, 0xde, + 0x69, 0xeb, 0x67, 0xd8, 0x76, 0x70, 0xd7, 0x76, 0x6c, 0x76, 0xa1, 0xae, 0xf8, 0x3b, 0x90, 0xf5, + 0xa9, 0x3a, 0x8d, 0x3a, 0x48, 0xc6, 0xa7, 0xf2, 0x20, 0x7c, 0xb7, 0x1e, 0x75, 0x46, 0x43, 0x37, + 0x6a, 0xbd, 0x5c, 0x9f, 0x93, 0x32, 0x01, 0xd1, 0xff, 0x92, 0x84, 0x14, 0xcf, 0x3e, 0xfa, 0x04, + 0x52, 0xfc, 0x0c, 0xc2, 0xa3, 0xe5, 0x59, 0xa3, 0x47, 0xdb, 0xee, 0xbb, 0xc4, 0x3a, 0x0a, 0xfa, + 0x9d, 0x0b, 0x8f, 0x18, 0x02, 0x1c, 0xbb, 0xf9, 0x13, 0x13, 0x37, 0xff, 0x1a, 0x2c, 0xfa, 0x74, + 0xe4, 0x5a, 0xa2, 0x21, 0x2d, 0x1a, 0xf2, 0x01, 0xed, 0x43, 0x26, 0xba, 0xd0, 0x53, 0x6f, 0xba, + 0xd0, 0x57, 0x78, 0x42, 0xf9, 0xb8, 0xa1, 0x04, 0x46, 0xba, 0xab, 0xee, 0xf5, 0x1b, 0xa8, 0x29, + 0xf4, 0x11, 0xac, 0x8e, 0x1b, 0x62, 0x78, 0xcf, 0xc9, 0xe1, 0xa0, 0x18, 0x29, 0xd4, 0x45, 0x37, + 0xd9, 0x3d, 0xe5, 0xac, 0x98, 0x16, 0xe7, 0x1a, 0x77, 0xcf, 0xa6, 0x18, 0x1a, 0x37, 0x21, 0x1b, + 0xd8, 0x7d, 0x17, 0xb3, 0x91, 0x4f, 0xd4, 0x90, 0x30, 0x16, 0xa0, 0x47, 0xb0, 0x2c, 0x0b, 0xf6, + 0x9c, 0x11, 0x57, 0x0c, 0x65, 0xd9, 0x79, 0x63, 0x9e, 0xa8, 0xc7, 0x10, 0x66, 0x14, 0xce, 0xe2, + 0x8f, 0xfa, 0x05, 0x14, 0x26, 0xf4, 0xe8, 0x1e, 0x14, 0xf9, 0x7c, 0x20, 0x46, 0x18, 0x46, 0x4d, + 0xbe, 0xa3, 0x22, 0x56, 0x01, 0x7b, 0x1e, 0x27, 0x4e, 0x87, 0xf2, 0xec, 0xa1, 0x9f, 0xc2, 0x66, + 0x04, 0x0c, 0x88, 0x73, 0x6a, 0xe2, 0x11, 0x1b, 0x10, 0x97, 0xf1, 0x1e, 0x60, 0xbb, 0x7d, 0x45, + 0xb9, 0x0d, 0x65, 0xd4, 0x26, 0xce, 0x69, 0x7d, 0x02, 0xa0, 0x3f, 0x84, 0xf7, 0x26, 0xb6, 0x56, + 0xeb, 0xbe, 0xad, 0x03, 0xfa, 0xdf, 0x34, 0x58, 0x92, 0x73, 0x57, 0x8c, 0x3a, 0xda, 0x6c, 0xea, + 0x24, 0xe6, 0x51, 0x27, 0xf9, 0xee, 0xd4, 0xa9, 0x03, 0x44, 0xf9, 0xe0, 0xb7, 0xfd, 0x9c, 0x2b, + 0x40, 0xba, 0xd8, 0xb6, 0xfb, 0xaa, 0xad, 0xc4, 0x8c, 0xf4, 0x3f, 0x27, 0x20, 0x1b, 0xe9, 0x51, + 0x1d, 0x0a, 0xa1, 0x5f, 0xe6, 0xa9, 0x83, 0xfb, 0xaa, 0x7c, 0xee, 0xce, 0x75, 0xee, 0x91, 0x83, + 0xfb, 0x46, 0x4e, 0xf9, 0xc3, 0x1f, 0x66, 0x53, 0x31, 0x31, 0x87, 0x8a, 0x13, 0xdc, 0x4f, 0xbe, + 0x1b, 0xf7, 0x27, 0x58, 0x9a, 0xba, 0xce, 0xd2, 0xc3, 0x29, 0x96, 0xca, 0x12, 0x7b, 0xff, 0x0d, + 0x2c, 0x95, 0x19, 0xbe, 0xce, 0xd5, 0xbf, 0x26, 0x20, 0xd3, 0x12, 0x73, 0x23, 0x76, 0xbe, 0x8e, + 0x16, 0x73, 0x07, 0xb2, 0x1e, 0x75, 0x4c, 0xa9, 0x49, 0x09, 0x4d, 0xc6, 0xa3, 0x8e, 0x31, 0x45, + 0xa2, 0xc5, 0x1b, 0xea, 0x3f, 0x4b, 0x37, 0x90, 0x83, 0xf4, 0xb5, 0x1c, 0xe8, 0x3e, 0xe4, 0x65, + 0x28, 0x54, 0x93, 0xbf, 0xcf, 0x63, 0x20, 0x5e, 0x0c, 0xb5, 0xe9, 0xf7, 0x4e, 0xe9, 0xb6, 0x44, + 0x1a, 0x0a, 0xc7, 0x2d, 0xe4, 0x6b, 0x8f, 0xba, 0xad, 0x4b, 0xf3, 0x48, 0x6e, 0x28, 0x9c, 0xfe, + 0x7b, 0x0d, 0x60, 0x3c, 0x6d, 0xf1, 0x37, 0xb0, 0x40, 0xb8, 0x60, 0x4e, 0xec, 0x5c, 0x99, 0x97, + 0x34, 0xb5, 0x7f, 0x3e, 0x88, 0xfb, 0xbd, 0x0b, 0x85, 0x31, 0xb5, 0x03, 0x12, 0x3a, 0x53, 0x79, + 0xcd, 0xd0, 0xd5, 0x26, 0xcc, 0xc8, 0x9f, 0xc5, 0x9e, 0xf4, 0xbf, 0x6b, 0x90, 0x15, 0x3e, 0x1d, + 0x11, 0x86, 0x27, 0x72, 0xa8, 0xbd, 0x7b, 0x0e, 0xef, 0x02, 0xc8, 0x65, 0x02, 0xfb, 0x39, 0x51, + 0xcc, 0xca, 0x0a, 0x49, 0xdb, 0x7e, 0x4e, 0xd0, 0xf7, 0xa3, 0x80, 0x27, 0x5f, 0x1f, 0x70, 0xd5, + 0x20, 0xc2, 0xb0, 0xdf, 0x86, 0xb4, 0x3b, 0x1a, 0x9a, 0xfc, 0x6d, 0x48, 0x4e, 0x84, 0x4b, 0xee, + 0x68, 0xd8, 0x39, 0x0f, 0xf4, 0xdf, 0x40, 0xba, 0x73, 0x2e, 0x3e, 0x0d, 0xc8, 0x1b, 0x9b, 0xaa, + 0xf7, 0x51, 0xd9, 0x25, 0x33, 0x5c, 0x20, 0x5e, 0xbf, 0x66, 0x5c, 0xfe, 0xa8, 0xf6, 0x96, 0x1f, + 0x1d, 0xd4, 0xe7, 0x86, 0x0f, 0xff, 0xa9, 0x41, 0x2e, 0xd6, 0x6d, 0xd0, 0x77, 0xe1, 0x56, 0xe3, + 0xf0, 0x64, 0xf7, 0x33, 0xb3, 0xb9, 0x67, 0x3e, 0x3a, 0xac, 0x3f, 0x36, 0x3f, 0x3f, 0xfe, 0xec, + 0xf8, 0xe4, 0x17, 0xc7, 0xc5, 0x85, 0xf2, 0xfa, 0xe5, 0x55, 0x15, 0xc5, 0xb0, 0x9f, 0xbb, 0x4f, + 0x5d, 0xfa, 0xcc, 0x45, 0x3b, 0xb0, 0x36, 0x69, 0x52, 0x6f, 0xb4, 0xf7, 0x8f, 0x3b, 0x45, 0xad, + 0x7c, 0xeb, 0xf2, 0xaa, 0xba, 0x1a, 0xb3, 0xa8, 0x77, 0x03, 0xe2, 0xb2, 0x69, 0x83, 0xdd, 0x93, + 0xa3, 0xa3, 0x66, 0xa7, 0x98, 0x98, 0x32, 0x50, 0xed, 0xff, 0x03, 0x58, 0x9d, 0x34, 0x38, 0x6e, + 0x1e, 0x16, 0x93, 0x65, 0x74, 0x79, 0x55, 0x5d, 0x8e, 0xa1, 0x8f, 0x6d, 0xa7, 0x9c, 0xf9, 0xdd, + 0x1f, 0x2a, 0x0b, 0x7f, 0xfa, 0x63, 0x45, 0xe3, 0x27, 0x2b, 0x4c, 0xf4, 0x08, 0xf4, 0x31, 0xdc, + 0x6e, 0x37, 0x1f, 0x1f, 0xef, 0xef, 0x99, 0x47, 0xed, 0xc7, 0x66, 0xe7, 0x57, 0xad, 0xfd, 0xd8, + 0xe9, 0x56, 0x2e, 0xaf, 0xaa, 0x39, 0x75, 0xa4, 0x79, 0xe8, 0x96, 0xb1, 0xff, 0xe4, 0xa4, 0xb3, + 0x5f, 0xd4, 0x24, 0xba, 0xe5, 0x13, 0xde, 0xc0, 0x04, 0xfa, 0x3e, 0x6c, 0xcc, 0x40, 0x47, 0x07, + 0x5b, 0xbd, 0xbc, 0xaa, 0x16, 0x5a, 0x3e, 0x91, 0xf5, 0x23, 0x2c, 0x6a, 0x50, 0x9a, 0xb6, 0x38, + 0x69, 0x9d, 0xb4, 0xeb, 0x87, 0xc5, 0x6a, 0xb9, 0x78, 0x79, 0x55, 0xcd, 0x87, 0xcd, 0x90, 0xe3, + 0xc7, 0x27, 0x6b, 0xfc, 0xfc, 0x8b, 0x97, 0x15, 0xed, 0xcb, 0x97, 0x15, 0xed, 0x3f, 0x2f, 0x2b, + 0xda, 0x8b, 0x57, 0x95, 0x85, 0x2f, 0x5f, 0x55, 0x16, 0xfe, 0xf5, 0xaa, 0xb2, 0xf0, 0xeb, 0x1f, + 0xf4, 0x6d, 0x36, 0x18, 0x75, 0x6b, 0x3d, 0x3a, 0xdc, 0x89, 0x7f, 0x0e, 0x1b, 0xff, 0x95, 0x9f, + 0xe5, 0xae, 0x7f, 0x2a, 0xeb, 0x2e, 0x09, 0xf9, 0x27, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xf4, + 0xa6, 0x90, 0x46, 0xeb, 0x13, 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -2402,6 +2526,18 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VoteExtension != nil { + { + size, err := m.VoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) @@ -2421,12 +2557,12 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err16 != nil { - return 0, err16 + n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err17 != nil { + return 0, err17 } - i -= n16 - i = encodeVarintTypes(dAtA, i, uint64(n16)) + i -= n17 + i = encodeVarintTypes(dAtA, i, uint64(n17)) i-- dAtA[i] = 0x2a { @@ -2457,6 +2593,73 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppDataSelfAuthenticating) > 0 { + i -= len(m.AppDataSelfAuthenticating) + copy(dAtA[i:], m.AppDataSelfAuthenticating) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppDataSelfAuthenticating))) + i-- + dAtA[i] = 0x12 + } + if len(m.AppDataToSign) > 0 { + i -= len(m.AppDataToSign) + copy(dAtA[i:], m.AppDataToSign) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppDataToSign))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VoteExtensionToSign) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteExtensionToSign) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteExtensionToSign) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppDataToSign) > 0 { + i -= len(m.AppDataToSign) + copy(dAtA[i:], m.AppDataToSign) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppDataToSign))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Commit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2534,6 +2737,18 @@ func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VoteExtension != nil { + { + size, err := m.VoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) @@ -2541,12 +2756,12 @@ func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err19 != nil { - return 0, err19 + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err21 != nil { + return 0, err21 } - i -= n19 - i = encodeVarintTypes(dAtA, i, uint64(n19)) + i -= n21 + i = encodeVarintTypes(dAtA, i, uint64(n21)) i-- dAtA[i] = 0x1a if len(m.ValidatorAddress) > 0 { @@ -2591,12 +2806,12 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err20 != nil { - return 0, err20 + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err22 != nil { + return 0, err22 } - i -= n20 - i = encodeVarintTypes(dAtA, i, uint64(n20)) + i -= n22 + i = encodeVarintTypes(dAtA, i, uint64(n22)) i-- dAtA[i] = 0x32 { @@ -3168,6 +3383,40 @@ func (m *Vote) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.VoteExtension != nil { + l = m.VoteExtension.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *VoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppDataToSign) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppDataSelfAuthenticating) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *VoteExtensionToSign) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppDataToSign) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -3213,6 +3462,10 @@ func (m *CommitSig) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.VoteExtension != nil { + l = m.VoteExtension.Size() + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -5593,6 +5846,244 @@ func (m *Vote) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteExtension == nil { + m.VoteExtension = &VoteExtension{} + } + if err := m.VoteExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppDataToSign", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppDataToSign = append(m.AppDataToSign[:0], dAtA[iNdEx:postIndex]...) + if m.AppDataToSign == nil { + m.AppDataToSign = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppDataSelfAuthenticating", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppDataSelfAuthenticating = append(m.AppDataSelfAuthenticating[:0], dAtA[iNdEx:postIndex]...) + if m.AppDataSelfAuthenticating == nil { + m.AppDataSelfAuthenticating = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteExtensionToSign) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteExtensionToSign: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteExtensionToSign: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppDataToSign", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppDataToSign = append(m.AppDataToSign[:0], dAtA[iNdEx:postIndex]...) + if m.AppDataToSign == nil { + m.AppDataToSign = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -5918,6 +6409,42 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteExtension == nil { + m.VoteExtension = &VoteExtensionToSign{} + } + if err := m.VoteExtension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 3b91de107c..19bb7b140c 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -2,7 +2,6 @@ package client_test import ( "context" - "fmt" "testing" "time" @@ -26,6 +25,7 @@ func MakeTxKV() ([]byte, []byte, []byte) { } func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + t.Helper() // make the tx _, _, tx := MakeTxKV() @@ -43,7 +43,7 @@ func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, case "sync": txres, err = c.BroadcastTxSync(ctx, tx) default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + require.FailNowf(t, "Unknown broadcastMethod %s", broadcastMethod) } if assert.NoError(t, err) { assert.Equal(t, txres.Code, abci.CodeTypeOK) @@ -52,7 +52,7 @@ func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, // and wait for confirmation evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) + require.NoError(t, err) // and make sure it has the proper info txe, ok := evt.(types.EventDataTx) diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index ae4e29f529..9187ddc1ac 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -15,15 +15,11 @@ import ( "github.com/tendermint/tendermint/types" ) -// For some reason the empty node used in tests has a time of -// 2018-10-10 08:20:13.695936996 +0000 UTC -// this is because the test genesis time is set here -// so in order to validate evidence we need evidence to be the same time -var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) - func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, - chainID string) *types.DuplicateVoteEvidence { + chainID string, + timestamp time.Time, +) *types.DuplicateVoteEvidence { t.Helper() var err error @@ -39,7 +35,7 @@ func newEvidence(t *testing.T, val *privval.FilePV, validator := types.NewValidator(val.Key.PubKey, 10) valSet := types.NewValidatorSet([]*types.Validator{validator}) - ev, err := types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) + ev, err := types.NewDuplicateVoteEvidence(vote, vote2, timestamp, valSet) require.NoError(t, err) return ev } @@ -48,6 +44,7 @@ func makeEvidences( t *testing.T, val *privval.FilePV, chainID string, + timestamp time.Time, ) (correct *types.DuplicateVoteEvidence, fakes []*types.DuplicateVoteEvidence) { vote := types.Vote{ ValidatorAddress: val.Key.Address, @@ -55,7 +52,7 @@ func makeEvidences( Height: 1, Round: 0, Type: tmproto.PrevoteType, - Timestamp: defaultTestTime, + Timestamp: timestamp, BlockID: types.BlockID{ Hash: tmhash.Sum(tmrand.Bytes(tmhash.Size)), PartSetHeader: types.PartSetHeader{ @@ -67,7 +64,7 @@ func makeEvidences( vote2 := vote vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) - correct = newEvidence(t, val, &vote, &vote2, chainID) + correct = newEvidence(t, val, &vote, &vote2, chainID, timestamp) fakes = make([]*types.DuplicateVoteEvidence, 0) @@ -75,34 +72,34 @@ func makeEvidences( { v := vote2 v.ValidatorAddress = []byte("some_address") - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different height { v := vote2 v.Height = vote.Height + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different round { v := vote2 v.Round = vote.Round + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // different type { v := vote2 v.Type = tmproto.PrecommitType - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } // exactly same vote { v := vote - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, timestamp)) } return correct, fakes diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 38fb4fcf7b..e26d499f12 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -22,7 +22,8 @@ func TestHTTPSimple(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_simple") + conf, err := rpctest.CreateConfig("ExampleHTTP_simple") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -42,7 +43,7 @@ func TestHTTPSimple(t *testing.T) { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - bres, err := c.BroadcastTxCommit(context.Background(), tx) + bres, err := c.BroadcastTxCommit(ctx, tx) require.NoError(t, err) if err != nil { log.Fatal(err) @@ -52,7 +53,7 @@ func TestHTTPSimple(t *testing.T) { } // Now try to fetch the value for the key - qres, err := c.ABCIQuery(context.Background(), "/key", k) + qres, err := c.ABCIQuery(ctx, "/key", k) require.NoError(t, err) require.False(t, qres.Response.IsErr(), "ABCIQuery failed") require.True(t, bytes.Equal(qres.Response.Key, k), @@ -71,7 +72,8 @@ func TestHTTPBatching(t *testing.T) { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf := rpctest.CreateConfig("ExampleHTTP_batching") + conf, err := rpctest.CreateConfig("ExampleHTTP_batching") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -116,7 +118,7 @@ func TestHTTPBatching(t *testing.T) { // Now let's query for the original results as a batch exists := 0 for _, key := range [][]byte{k1, k2} { - _, err := batch.ABCIQuery(context.Background(), "/key", key) + _, err := batch.ABCIQuery(ctx, "/key", key) if err == nil { exists++ diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 58e48dbba6..b9ad05aac3 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -4,10 +4,8 @@ import ( "context" "errors" "fmt" - "sync" "time" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -34,13 +32,13 @@ func DefaultWaitStrategy(delta int64) (abort error) { // // If waiter is nil, we use DefaultWaitStrategy, but you can also // provide your own implementation -func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { +func WaitForHeight(ctx context.Context, c StatusClient, h int64, waiter Waiter) error { if waiter == nil { waiter = DefaultWaitStrategy } delta := int64(1) for delta > 0 { - s, err := c.Status(context.Background()) + s, err := c.Status(ctx) if err != nil { return err } @@ -59,7 +57,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { // when the timeout duration has expired. // // This handles subscribing and unsubscribing under the hood -func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) (types.TMEventData, error) { +func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) (types.EventData, error) { const subscriber = "helpers" ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -73,7 +71,7 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( // make sure to un-register after the test is over defer func() { if deferErr := c.UnsubscribeAll(ctx, subscriber); deferErr != nil { - panic(err) + panic(deferErr) } }() @@ -84,86 +82,3 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( return nil, errors.New("timed out waiting for event") } } - -var ( - // ErrClientRunning is returned by Start when the client is already running. - ErrClientRunning = errors.New("client already running") - - // ErrClientNotRunning is returned by Stop when the client is not running. - ErrClientNotRunning = errors.New("client is not running") -) - -// RunState is a helper that a client implementation can embed to implement -// common plumbing for keeping track of run state and logging. -// -// TODO(creachadair): This type is a temporary measure, and will be removed. -// See the discussion on #6971. -type RunState struct { - Logger log.Logger - - mu sync.Mutex - name string - isRunning bool - quit chan struct{} -} - -// NewRunState returns a new unstarted run state tracker with the given logging -// label and log sink. If logger == nil, a no-op logger is provided by default. -func NewRunState(name string, logger log.Logger) *RunState { - if logger == nil { - logger = log.NewNopLogger() - } - return &RunState{ - name: name, - Logger: logger, - } -} - -// Start sets the state to running, or reports an error. -func (r *RunState) Start() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.isRunning { - r.Logger.Error("not starting client, it is already started", "client", r.name) - return ErrClientRunning - } - r.Logger.Info("starting client", "client", r.name) - r.isRunning = true - r.quit = make(chan struct{}) - return nil -} - -// Stop sets the state to not running, or reports an error. -func (r *RunState) Stop() error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.isRunning { - r.Logger.Error("not stopping client; it is already stopped", "client", r.name) - return ErrClientNotRunning - } - r.Logger.Info("stopping client", "client", r.name) - r.isRunning = false - close(r.quit) - return nil -} - -// SetLogger updates the log sink. -func (r *RunState) SetLogger(logger log.Logger) { - r.mu.Lock() - defer r.mu.Unlock() - r.Logger = logger -} - -// IsRunning reports whether the state is running. -func (r *RunState) IsRunning() bool { - r.mu.Lock() - defer r.mu.Unlock() - return r.isRunning -} - -// Quit returns a channel that is closed when a call to Stop succeeds. -func (r *RunState) Quit() <-chan struct{} { - r.mu.Lock() - defer r.mu.Unlock() - return r.quit -} diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 60732b9914..a66becbd58 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -1,6 +1,7 @@ package client_test import ( + "context" "errors" "strings" "testing" @@ -14,7 +15,8 @@ import ( ) func TestWaitForHeight(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // test with error result - immediate failure m := &mock.StatusMock{ @@ -25,11 +27,12 @@ func TestWaitForHeight(t *testing.T) { r := mock.NewStatusRecorder(m) // connection failure always leads to error - err := client.WaitForHeight(r, 8, nil) - require.NotNil(err) - require.Equal("bye", err.Error()) + err := client.WaitForHeight(ctx, r, 8, nil) + require.Error(t, err) + require.Equal(t, "bye", err.Error()) + // we called status once to check - require.Equal(1, len(r.Calls)) + require.Equal(t, 1, len(r.Calls)) // now set current block height to 10 m.Call = mock.Call{ @@ -37,17 +40,19 @@ func TestWaitForHeight(t *testing.T) { } // we will not wait for more than 10 blocks - err = client.WaitForHeight(r, 40, nil) - require.NotNil(err) - require.True(strings.Contains(err.Error(), "aborting")) + err = client.WaitForHeight(ctx, r, 40, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "aborting")) + // we called status once more to check - require.Equal(2, len(r.Calls)) + require.Equal(t, 2, len(r.Calls)) // waiting for the past returns immediately - err = client.WaitForHeight(r, 5, nil) - require.Nil(err) + err = client.WaitForHeight(ctx, r, 5, nil) + require.NoError(t, err) + // we called status once more to check - require.Equal(3, len(r.Calls)) + require.Equal(t, 3, len(r.Calls)) // since we can't update in a background goroutine (test --race) // we use the callback to update the status height @@ -58,20 +63,21 @@ func TestWaitForHeight(t *testing.T) { } // we wait for a few blocks - err = client.WaitForHeight(r, 12, myWaiter) - require.Nil(err) + err = client.WaitForHeight(ctx, r, 12, myWaiter) + require.NoError(t, err) + // we called status once to check - require.Equal(5, len(r.Calls)) + require.Equal(t, 5, len(r.Calls)) pre := r.Calls[3] - require.Nil(pre.Error) + require.Nil(t, pre.Error) prer, ok := pre.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) + require.True(t, ok) + assert.Equal(t, int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] - require.Nil(post.Error) + require.Nil(t, post.Error) postr, ok := post.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) + require.True(t, ok) + assert.Equal(t, int64(15), postr.SyncInfo.LatestBlockHeight) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 5bd7b398a9..ebdc18eb2f 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -92,9 +92,11 @@ type baseRPCClient struct { caller jsonrpcclient.Caller } -var _ rpcClient = (*HTTP)(nil) -var _ rpcClient = (*BatchHTTP)(nil) -var _ rpcClient = (*baseRPCClient)(nil) +var ( + _ rpcClient = (*HTTP)(nil) + _ rpcClient = (*BatchHTTP)(nil) + _ rpcClient = (*baseRPCClient)(nil) +) //----------------------------------------------------------------------------- // HTTP @@ -120,19 +122,9 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { return NewWithClient(remote, c) } -// NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function returns an error when client is nil -// or an invalid remote. +// NewWithClient constructs an RPC client using a custom HTTP client. +// An error is reported if c == nil or remote is an invalid address. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { - if c == nil { - return nil, errors.New("nil client") - } - return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) -} - -// NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote or nil client. -func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { if c == nil { return nil, errors.New("nil client") } @@ -141,7 +133,7 @@ func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*H return nil, err } - wsEvents, err := newWsEvents(remote, wso) + wsEvents, err := newWsEvents(remote) if err != nil { return nil, err } @@ -201,21 +193,17 @@ func (b *BatchHTTP) Count() int { func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) { result := new(coretypes.ResultStatus) - _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "status", nil, result); err != nil { return nil, err } - return result, nil } func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { result := new(coretypes.ResultABCIInfo) - _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "abci_info", nil, result); err != nil { return nil, err } - return result, nil } @@ -233,13 +221,14 @@ func (c *baseRPCClient) ABCIQueryWithOptions( data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { result := new(coretypes.ResultABCIQuery) - _, err := c.caller.Call(ctx, "abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, - result) - if err != nil { + if err := c.caller.Call(ctx, "abci_query", abciQueryArgs{ + Path: path, + Data: data, + Height: opts.Height, + Prove: opts.Prove, + }, result); err != nil { return nil, err } - return result, nil } @@ -248,8 +237,7 @@ func (c *baseRPCClient) BroadcastTxCommit( tx types.Tx, ) (*coretypes.ResultBroadcastTxCommit, error) { result := new(coretypes.ResultBroadcastTxCommit) - _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, "broadcast_tx_commit", txArgs{Tx: tx}, result); err != nil { return nil, err } return result, nil @@ -275,8 +263,7 @@ func (c *baseRPCClient) broadcastTX( tx types.Tx, ) (*coretypes.ResultBroadcastTx, error) { result := new(coretypes.ResultBroadcastTx) - _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, route, txArgs{Tx: tx}, result); err != nil { return nil, err } return result, nil @@ -284,15 +271,12 @@ func (c *baseRPCClient) broadcastTX( func (c *baseRPCClient) UnconfirmedTxs( ctx context.Context, - limit *int, + page *int, + perPage *int, ) (*coretypes.ResultUnconfirmedTxs, error) { result := new(coretypes.ResultUnconfirmedTxs) - params := make(map[string]interface{}) - if limit != nil { - params["limit"] = limit - } - _, err := c.caller.Call(ctx, "unconfirmed_txs", params, result) - if err != nil { + + if err := c.caller.Call(ctx, "unconfirmed_txs", unconfirmedArgs{Page: page, PerPage: perPage}, result); err != nil { return nil, err } return result, nil @@ -300,8 +284,7 @@ func (c *baseRPCClient) UnconfirmedTxs( func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { result := new(coretypes.ResultUnconfirmedTxs) - _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "num_unconfirmed_txs", nil, result); err != nil { return nil, err } return result, nil @@ -309,16 +292,14 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.Resul func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { result := new(coretypes.ResultCheckTx) - _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, "check_tx", txArgs{Tx: tx}, result); err != nil { return nil, err } return result, nil } func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { - _, err := c.caller.Call(ctx, "remove_tx", map[string]interface{}{"tx_key": txKey}, nil) - if err != nil { + if err := c.caller.Call(ctx, "remove_tx", txKeyArgs{TxKey: txKey[:]}, nil); err != nil { return err } return nil @@ -326,8 +307,7 @@ func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { result := new(coretypes.ResultNetInfo) - _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "net_info", nil, result); err != nil { return nil, err } return result, nil @@ -335,8 +315,7 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { result := new(coretypes.ResultDumpConsensusState) - _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "dump_consensus_state", nil, result); err != nil { return nil, err } return result, nil @@ -344,8 +323,7 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.Resu func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { result := new(coretypes.ResultConsensusState) - _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "consensus_state", nil, result); err != nil { return nil, err } return result, nil @@ -356,12 +334,7 @@ func (c *baseRPCClient) ConsensusParams( height *int64, ) (*coretypes.ResultConsensusParams, error) { result := new(coretypes.ResultConsensusParams) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "consensus_params", params, result) - if err != nil { + if err := c.caller.Call(ctx, "consensus_params", heightArgs{Height: height}, result); err != nil { return nil, err } return result, nil @@ -369,8 +342,7 @@ func (c *baseRPCClient) ConsensusParams( func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) { result := new(coretypes.ResultHealth) - _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "health", nil, result); err != nil { return nil, err } return result, nil @@ -382,10 +354,10 @@ func (c *baseRPCClient) BlockchainInfo( maxHeight int64, ) (*coretypes.ResultBlockchainInfo, error) { result := new(coretypes.ResultBlockchainInfo) - _, err := c.caller.Call(ctx, "blockchain", - map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, - result) - if err != nil { + if err := c.caller.Call(ctx, "blockchain", blockchainInfoArgs{ + MinHeight: minHeight, + MaxHeight: maxHeight, + }, result); err != nil { return nil, err } return result, nil @@ -393,8 +365,7 @@ func (c *baseRPCClient) BlockchainInfo( func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { result := new(coretypes.ResultGenesis) - _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "genesis", nil, result); err != nil { return nil, err } return result, nil @@ -402,8 +373,7 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { result := new(coretypes.ResultGenesisChunk) - _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) - if err != nil { + if err := c.caller.Call(ctx, "genesis_chunked", genesisChunkArgs{Chunk: id}, result); err != nil { return nil, err } return result, nil @@ -411,12 +381,7 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { result := new(coretypes.ResultBlock) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "block", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block", heightArgs{Height: height}, result); err != nil { return nil, err } return result, nil @@ -424,11 +389,7 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.Re func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { result := new(coretypes.ResultBlock) - params := map[string]interface{}{ - "hash": hash, - } - _, err := c.caller.Call(ctx, "block_by_hash", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block_by_hash", hashArgs{Hash: hash}, result); err != nil { return nil, err } return result, nil @@ -439,12 +400,23 @@ func (c *baseRPCClient) BlockResults( height *int64, ) (*coretypes.ResultBlockResults, error) { result := new(coretypes.ResultBlockResults) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height + if err := c.caller.Call(ctx, "block_results", heightArgs{Height: height}, result); err != nil { + return nil, err } - _, err := c.caller.Call(ctx, "block_results", params, result) - if err != nil { + return result, nil +} + +func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { + result := new(coretypes.ResultHeader) + if err := c.caller.Call(ctx, "header", heightArgs{Height: height}, result); err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { + result := new(coretypes.ResultHeader) + if err := c.caller.Call(ctx, "header_by_hash", hashArgs{Hash: hash}, result); err != nil { return nil, err } return result, nil @@ -452,12 +424,7 @@ func (c *baseRPCClient) BlockResults( func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { result := new(coretypes.ResultCommit) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "commit", params, result) - if err != nil { + if err := c.caller.Call(ctx, "commit", heightArgs{Height: height}, result); err != nil { return nil, err } return result, nil @@ -465,12 +432,7 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.R func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { result := new(coretypes.ResultTx) - params := map[string]interface{}{ - "hash": hash, - "prove": prove, - } - _, err := c.caller.Call(ctx, "tx", params, result) - if err != nil { + if err := c.caller.Call(ctx, "tx", hashArgs{Hash: hash, Prove: prove}, result); err != nil { return nil, err } return result, nil @@ -484,23 +446,14 @@ func (c *baseRPCClient) TxSearch( perPage *int, orderBy string, ) (*coretypes.ResultTxSearch, error) { - result := new(coretypes.ResultTxSearch) - params := map[string]interface{}{ - "query": query, - "prove": prove, - "order_by": orderBy, - } - - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - - _, err := c.caller.Call(ctx, "tx_search", params, result) - if err != nil { + if err := c.caller.Call(ctx, "tx_search", searchArgs{ + Query: query, + Prove: prove, + OrderBy: orderBy, + Page: page, + PerPage: perPage, + }, result); err != nil { return nil, err } @@ -513,22 +466,13 @@ func (c *baseRPCClient) BlockSearch( page, perPage *int, orderBy string, ) (*coretypes.ResultBlockSearch, error) { - result := new(coretypes.ResultBlockSearch) - params := map[string]interface{}{ - "query": query, - "order_by": orderBy, - } - - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - - _, err := c.caller.Call(ctx, "block_search", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block_search", searchArgs{ + Query: query, + OrderBy: orderBy, + Page: page, + PerPage: perPage, + }, result); err != nil { return nil, err } @@ -542,18 +486,11 @@ func (c *baseRPCClient) Validators( perPage *int, ) (*coretypes.ResultValidators, error) { result := new(coretypes.ResultValidators) - params := make(map[string]interface{}) - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "validators", params, result) - if err != nil { + if err := c.caller.Call(ctx, "validators", validatorArgs{ + Height: height, + Page: page, + PerPage: perPage, + }, result); err != nil { return nil, err } return result, nil @@ -564,8 +501,9 @@ func (c *baseRPCClient) BroadcastEvidence( ev types.Evidence, ) (*coretypes.ResultBroadcastEvidence, error) { result := new(coretypes.ResultBroadcastEvidence) - _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) - if err != nil { + if err := c.caller.Call(ctx, "broadcast_evidence", evidenceArgs{ + Evidence: coretypes.Evidence{Value: ev}, + }, result); err != nil { return nil, err } return result, nil diff --git a/rpc/client/http/request.go b/rpc/client/http/request.go new file mode 100644 index 0000000000..746cb776d5 --- /dev/null +++ b/rpc/client/http/request.go @@ -0,0 +1,65 @@ +package http + +// The types in this file define the JSON encoding for RPC method parameters +// from the client to the server. + +import ( + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/rpc/coretypes" +) + +type abciQueryArgs struct { + Path string `json:"path"` + Data bytes.HexBytes `json:"data"` + Height int64 `json:"height,string"` + Prove bool `json:"prove"` +} + +type txArgs struct { + Tx []byte `json:"tx"` +} + +type txKeyArgs struct { + TxKey []byte `json:"tx_key"` +} + +type unconfirmedArgs struct { + Page *int `json:"page,string,omitempty"` + PerPage *int `json:"per_page,string,omitempty"` +} + +type heightArgs struct { + Height *int64 `json:"height,string,omitempty"` +} + +type hashArgs struct { + Hash bytes.HexBytes `json:"hash"` + Prove bool `json:"prove,omitempty"` +} + +type blockchainInfoArgs struct { + MinHeight int64 `json:"minHeight,string"` + MaxHeight int64 `json:"maxHeight,string"` +} + +type genesisChunkArgs struct { + Chunk uint `json:"chunk,string"` +} + +type searchArgs struct { + Query string `json:"query"` + Prove bool `json:"prove,omitempty"` + OrderBy string `json:"order_by,omitempty"` + Page *int `json:"page,string,omitempty"` + PerPage *int `json:"per_page,string,omitempty"` +} + +type validatorArgs struct { + Height *int64 `json:"height,string,omitempty"` + Page *int `json:"page,string,omitempty"` + PerPage *int `json:"per_page,string,omitempty"` +} + +type evidenceArgs struct { + Evidence coretypes.Evidence `json:"evidence"` +} diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 0f908e271b..43a70b74e4 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -2,53 +2,25 @@ package http import ( "context" - "errors" + "encoding/json" "fmt" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - tmjson "github.com/tendermint/tendermint/libs/json" - "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) -// WSOptions for the WS part of the HTTP client. -type WSOptions struct { - Path string // path (e.g. "/ws") - - jsonrpcclient.WSOptions // WSClient options -} - -// DefaultWSOptions returns default WS options. -// See jsonrpcclient.DefaultWSOptions. -func DefaultWSOptions() WSOptions { - return WSOptions{ - Path: "/websocket", - WSOptions: jsonrpcclient.DefaultWSOptions(), - } -} - -// Validate performs a basic validation of WSOptions. -func (wso WSOptions) Validate() error { - if len(wso.Path) <= 1 { - return errors.New("empty Path") - } - if wso.Path[0] != '/' { - return errors.New("leading slash is missing in Path") - } - - return nil -} - // wsEvents is a wrapper around WSClient, which implements EventsClient. type wsEvents struct { - *rpcclient.RunState - ws *jsonrpcclient.WSClient + Logger log.Logger + ws *jsonrpcclient.WSClient - mtx tmsync.RWMutex + mtx sync.RWMutex subscriptions map[string]*wsSubscription } @@ -60,25 +32,14 @@ type wsSubscription struct { var _ rpcclient.EventsClient = (*wsEvents)(nil) -func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { - // validate options - if err := wso.Validate(); err != nil { - return nil, fmt.Errorf("invalid WSOptions: %w", err) - } - - // remove the trailing / from the remote else the websocket endpoint - // won't parse correctly - if remote[len(remote)-1] == '/' { - remote = remote[:len(remote)-1] - } - +func newWsEvents(remote string) (*wsEvents, error) { w := &wsEvents{ + Logger: log.NewNopLogger(), subscriptions: make(map[string]*wsSubscription), } - w.RunState = rpcclient.NewRunState("wsEvents", nil) var err error - w.ws, err = jsonrpcclient.NewWSWithOptions(remote, wso.Path, wso.WSOptions) + w.ws, err = jsonrpcclient.NewWS(strings.TrimSuffix(remote, "/"), "/websocket") if err != nil { return nil, fmt.Errorf("can't create WS client: %w", err) } @@ -86,23 +47,20 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { // resubscribe immediately w.redoSubscriptionsAfter(0 * time.Second) }) - w.ws.SetLogger(w.Logger) + w.ws.Logger = w.Logger return w, nil } // Start starts the websocket client and the event loop. -func (w *wsEvents) Start() error { - if err := w.ws.Start(); err != nil { +func (w *wsEvents) Start(ctx context.Context) error { + if err := w.ws.Start(ctx); err != nil { return err } - go w.eventListener() + go w.eventListener(ctx) return nil } -// IsRunning reports whether the websocket client is running. -func (w *wsEvents) IsRunning() bool { return w.ws.IsRunning() } - // Stop shuts down the websocket client. func (w *wsEvents) Stop() error { return w.ws.Stop() } @@ -120,11 +78,6 @@ func (w *wsEvents) Stop() error { return w.ws.Stop() } // It returns an error if wsEvents is not running. func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { - - if !w.IsRunning() { - return nil, rpcclient.ErrClientNotRunning - } - if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err } @@ -149,10 +102,6 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, // // It returns an error if wsEvents is not running. func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { - if !w.IsRunning() { - return rpcclient.ErrClientNotRunning - } - if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } @@ -175,10 +124,6 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // // It returns an error if wsEvents is not running. func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { - if !w.IsRunning() { - return rpcclient.ErrClientNotRunning - } - if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } @@ -216,7 +161,7 @@ func isErrAlreadySubscribed(err error) bool { return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error()) } -func (w *wsEvents) eventListener() { +func (w *wsEvents) eventListener(ctx context.Context) { for { select { case resp, ok := <-w.ws.ResponsesCh: @@ -239,7 +184,7 @@ func (w *wsEvents) eventListener() { } result := new(coretypes.ResultEvent) - err := tmjson.Unmarshal(resp.Result, result) + err := json.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) continue @@ -258,11 +203,11 @@ func (w *wsEvents) eventListener() { if ok { select { case out.res <- *result: - case <-w.Quit(): + case <-ctx.Done(): return } } - case <-w.Quit(): + case <-ctx.Done(): return } } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 474eb99372..b53079852f 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -32,16 +32,9 @@ import ( // Client describes the interface of Tendermint RPC client implementations. type Client interface { - // These methods define the operational structure of the client. - - // Start the client. Start must report an error if the client is running. - Start() error - - // Stop the client. Stop must report an error if the client is not running. - Stop() error - - // IsRunning reports whether the client is running. - IsRunning() bool + // Start the client, which will run until the context terminates. + // An error from Start indicates the client could not start. + Start(context.Context) error // These embedded interfaces define the callable methods of the service. ABCIClient @@ -78,6 +71,8 @@ type SignClient interface { Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) + Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) + HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) @@ -134,7 +129,7 @@ type EventsClient interface { // // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe // or UnsubscribeAll. - Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) //nolint:lll + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) // Unsubscribe unsubscribes given subscriber from query. Unsubscribe(ctx context.Context, subscriber, query string) error // UnsubscribeAll unsubscribes given subscriber from all the queries. @@ -143,7 +138,7 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) + UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) RemoveTx(context.Context, types.TxKey) error diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 21ca6e6f1a..8b2a88314a 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -6,14 +6,14 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -38,9 +38,8 @@ don't need to do anything). It will keep trying indefinitely with exponential backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { - *types.EventBus + *eventbus.EventBus Logger log.Logger - ctx *rpctypes.Context env *rpccore.Environment } @@ -48,36 +47,30 @@ type Local struct { // local RPC client constructor needs to build a local client. type NodeService interface { RPCEnvironment() *rpccore.Environment - EventBus() *types.EventBus + EventBus() *eventbus.EventBus } // New configures a client that calls the Node directly. -func New(node NodeService) (*Local, error) { +func New(logger log.Logger, node NodeService) (*Local, error) { env := node.RPCEnvironment() if env == nil { return nil, errors.New("rpc is nil") } return &Local{ EventBus: node.EventBus(), - Logger: log.NewNopLogger(), - ctx: &rpctypes.Context{}, + Logger: logger, env: env, }, nil } var _ rpcclient.Client = (*Local)(nil) -// SetLogger allows to set a logger on the client. -func (c *Local) SetLogger(l log.Logger) { - c.Logger = l -} - func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.env.Status(c.ctx) + return c.env.Status(ctx) } func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - return c.env.ABCIInfo(c.ctx) + return c.env.ABCIInfo(ctx) } func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { @@ -89,31 +82,31 @@ func (c *Local) ABCIQueryWithOptions( path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) + return c.env.ABCIQuery(ctx, path, data, opts.Height, opts.Prove) } func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.env.BroadcastTxCommit(c.ctx, tx) + return c.env.BroadcastTxCommit(ctx, tx) } func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxAsync(c.ctx, tx) + return c.env.BroadcastTxAsync(ctx, tx) } func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxSync(c.ctx, tx) + return c.env.BroadcastTxSync(ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.env.UnconfirmedTxs(c.ctx, limit) +func (c *Local) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + return c.env.UnconfirmedTxs(ctx, page, perPage) } func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { - return c.env.NumUnconfirmedTxs(c.ctx) + return c.env.NumUnconfirmedTxs(ctx) } func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - return c.env.CheckTx(c.ctx, tx) + return c.env.CheckTx(ctx, tx) } func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { @@ -121,165 +114,169 @@ func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { } func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { - return c.env.NetInfo(c.ctx) + return c.env.NetInfo(ctx) } func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.env.DumpConsensusState(c.ctx) + return c.env.DumpConsensusState(ctx) } func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { - return c.env.GetConsensusState(c.ctx) + return c.env.GetConsensusState(ctx) } func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.env.ConsensusParams(c.ctx, height) + return c.env.ConsensusParams(ctx, height) } func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { - return c.env.Health(c.ctx) + return c.env.Health(ctx) } -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll - return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) +func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + return c.env.BlockchainInfo(ctx, minHeight, maxHeight) } func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { - return c.env.Genesis(c.ctx) + return c.env.Genesis(ctx) } func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { - return c.env.GenesisChunked(c.ctx, id) + return c.env.GenesisChunked(ctx, id) } func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.env.Block(c.ctx, height) + return c.env.Block(ctx, height) } func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - return c.env.BlockByHash(c.ctx, hash) + return c.env.BlockByHash(ctx, hash) } func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { - return c.env.BlockResults(c.ctx, height) + return c.env.BlockResults(ctx, height) +} + +func (c *Local) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { + return c.env.Header(ctx, height) +} + +func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { + return c.env.HeaderByHash(ctx, hash) } func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.env.Commit(c.ctx, height) + return c.env.Commit(ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll - return c.env.Validators(c.ctx, height, page, perPage) +func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { + return c.env.Validators(ctx, height, page, perPage) } func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { - return c.env.Tx(c.ctx, hash, prove) + return c.env.Tx(ctx, hash, prove) } func (c *Local) TxSearch( - _ context.Context, + ctx context.Context, queryString string, prove bool, page, perPage *int, orderBy string, ) (*coretypes.ResultTxSearch, error) { - return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy) + return c.env.TxSearch(ctx, queryString, prove, page, perPage, orderBy) } func (c *Local) BlockSearch( - _ context.Context, + ctx context.Context, queryString string, page, perPage *int, orderBy string, ) (*coretypes.ResultBlockSearch, error) { - return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy) + return c.env.BlockSearch(ctx, queryString, page, perPage, orderBy) } func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.env.BroadcastEvidence(c.ctx, ev) + return c.env.BroadcastEvidence(ctx, coretypes.Evidence{Value: ev}) } func (c *Local) Subscribe( ctx context.Context, subscriber, queryString string, - outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { + capacity ...int) (out <-chan coretypes.ResultEvent, err error) { q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } - outCap := 1 - if len(outCapacity) > 0 { - outCap = outCapacity[0] + limit, quota := 1, 0 + if len(capacity) > 0 { + limit = capacity[0] + if len(capacity) > 1 { + quota = capacity[1] + } } - var sub types.Subscription - if outCap > 0 { - sub, err = c.EventBus.Subscribe(ctx, subscriber, q, outCap) - } else { - sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) + ctx, cancel := context.WithCancel(ctx) + go func() { c.Wait(); cancel() }() + + subArgs := pubsub.SubscribeArgs{ + ClientID: subscriber, + Query: q, + Quota: quota, + Limit: limit, } + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err != nil { return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan coretypes.ResultEvent, outCap) - go c.eventsRoutine(sub, subscriber, q, outc) + outc := make(chan coretypes.ResultEvent, 1) + go c.eventsRoutine(ctx, sub, subArgs, outc) return outc, nil } func (c *Local) eventsRoutine( - sub types.Subscription, - subscriber string, - q pubsub.Query, - outc chan<- coretypes.ResultEvent) { + ctx context.Context, + sub eventbus.Subscription, + subArgs pubsub.SubscribeArgs, + outc chan<- coretypes.ResultEvent, +) { + qstr := subArgs.Query.String() for { - select { - case msg := <-sub.Out(): - result := coretypes.ResultEvent{ - SubscriptionID: msg.SubscriptionID(), - Query: q.String(), - Data: msg.Data(), - Events: msg.Events(), - } - - if cap(outc) == 0 { - outc <- result - } else { - select { - case outc <- result: - default: - c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) - } + msg, err := sub.Next(ctx) + if errors.Is(err, pubsub.ErrUnsubscribed) { + return // client unsubscribed + } else if err != nil { + c.Logger.Error("subscription was canceled, resubscribing", + "err", err, "query", subArgs.Query.String()) + sub = c.resubscribe(ctx, subArgs) + if sub == nil { + return // client terminated } - case <-sub.Canceled(): - if sub.Err() == pubsub.ErrUnsubscribed { - return - } - - c.Logger.Error("subscription was canceled, resubscribing...", "err", sub.Err(), "query", q.String()) - sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped - return - } - case <-c.Quit(): - return + continue + } + outc <- coretypes.ResultEvent{ + SubscriptionID: msg.SubscriptionID(), + Query: qstr, + Data: msg.Data(), + Events: msg.Events(), } } } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription { +func (c *Local) resubscribe(ctx context.Context, subArgs pubsub.SubscribeArgs) eventbus.Subscription { attempts := 0 for { if !c.IsRunning() { return nil } - sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q) + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err == nil { return sub } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index c2e0dc3cda..ad30430980 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "io/ioutil" "os" "testing" @@ -11,28 +10,29 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func NodeSuite(t *testing.T) (service.Service, *config.Config) { +func NodeSuite(t *testing.T, logger log.Logger) (service.Service, *config.Config) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) - conf := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t.Name()) + require.NoError(t, err) // start a tendermint node in the background to test against - dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) + dir, err := os.MkdirTemp("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) require.NoError(t, err) - app := kvstore.NewPersistentKVStoreApplication(dir) + app := kvstore.NewPersistentKVStoreApplication(logger, dir) node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { cancel() - assert.NoError(t, node.Stop()) assert.NoError(t, closer(ctx)) assert.NoError(t, app.Close()) node.Wait() diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 25fbbc05d2..18fbbf6a9b 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -19,7 +19,8 @@ import ( ) func TestABCIMock(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() key, value := []byte("foo"), []byte("bar") height := int64(10) @@ -46,40 +47,41 @@ func TestABCIMock(t *testing.T) { } // now, let's try to make some calls - _, err := m.ABCIInfo(context.Background()) - require.NotNil(err) - assert.Equal("foobar", err.Error()) + _, err := m.ABCIInfo(ctx) + require.Error(t, err) + assert.Equal(t, "foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions(context.Background(), "/", nil, client.ABCIQueryOptions{Prove: false}) + _query, err := m.ABCIQueryWithOptions(ctx, "/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response - require.Nil(err) - require.NotNil(query) - assert.EqualValues(key, query.Key) - assert.EqualValues(value, query.Value) - assert.Equal(height, query.Height) + require.NoError(t, err) + require.NotNil(t, query) + assert.EqualValues(t, key, query.Key) + assert.EqualValues(t, value, query.Value) + assert.Equal(t, height, query.Height) // non-commit calls always return errors - _, err = m.BroadcastTxSync(context.Background(), goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - _, err = m.BroadcastTxAsync(context.Background(), goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) + _, err = m.BroadcastTxSync(ctx, goodTx) + require.Error(t, err) + assert.Equal(t, "must commit", err.Error()) + _, err = m.BroadcastTxAsync(ctx, goodTx) + require.Error(t, err) + assert.Equal(t, "must commit", err.Error()) // commit depends on the input - _, err = m.BroadcastTxCommit(context.Background(), badTx) - require.NotNil(err) - assert.Equal("bad tx", err.Error()) - bres, err := m.BroadcastTxCommit(context.Background(), goodTx) - require.Nil(err, "%+v", err) - assert.EqualValues(0, bres.CheckTx.Code) - assert.EqualValues("stand", bres.CheckTx.Data) - assert.EqualValues("deliver", bres.DeliverTx.Data) + _, err = m.BroadcastTxCommit(ctx, badTx) + require.Error(t, err) + assert.Equal(t, "bad tx", err.Error()) + bres, err := m.BroadcastTxCommit(ctx, goodTx) + require.NoError(t, err, "%+v", err) + assert.EqualValues(t, 0, bres.CheckTx.Code) + assert.EqualValues(t, "stand", bres.CheckTx.Data) + assert.EqualValues(t, "deliver", bres.DeliverTx.Data) } func TestABCIRecorder(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This mock returns errors on everything but Query m := mock.ABCIMock{ @@ -93,90 +95,92 @@ func TestABCIRecorder(t *testing.T) { } r := mock.NewABCIRecorder(m) - require.Equal(0, len(r.Calls)) + require.Equal(t, 0, len(r.Calls)) - _, err := r.ABCIInfo(context.Background()) - assert.Nil(err, "expected no err on info") + _, err := r.ABCIInfo(ctx) + assert.NoError(t, err, "expected no err on info") _, err = r.ABCIQueryWithOptions( - context.Background(), + ctx, "path", bytes.HexBytes("data"), client.ABCIQueryOptions{Prove: false}, ) - assert.NotNil(err, "expected error on query") - require.Equal(2, len(r.Calls)) + assert.Error(t, err, "expected error on query") + require.Equal(t, 2, len(r.Calls)) info := r.Calls[0] - assert.Equal("abci_info", info.Name) - assert.Nil(info.Error) - assert.Nil(info.Args) - require.NotNil(info.Response) + assert.Equal(t, "abci_info", info.Name) + assert.Nil(t, info.Error) + assert.Nil(t, info.Args) + require.NotNil(t, info.Response) ir, ok := info.Response.(*coretypes.ResultABCIInfo) - require.True(ok) - assert.Equal("data", ir.Response.Data) - assert.Equal("v0.9.9", ir.Response.Version) + require.True(t, ok) + assert.Equal(t, "data", ir.Response.Data) + assert.Equal(t, "v0.9.9", ir.Response.Version) query := r.Calls[1] - assert.Equal("abci_query", query.Name) - assert.Nil(query.Response) - require.NotNil(query.Error) - assert.Equal("query", query.Error.Error()) - require.NotNil(query.Args) + assert.Equal(t, "abci_query", query.Name) + assert.Nil(t, query.Response) + require.NotNil(t, query.Error) + assert.Equal(t, "query", query.Error.Error()) + require.NotNil(t, query.Args) qa, ok := query.Args.(mock.QueryArgs) - require.True(ok) - assert.Equal("path", qa.Path) - assert.EqualValues("data", qa.Data) - assert.False(qa.Prove) + require.True(t, ok) + assert.Equal(t, "path", qa.Path) + assert.EqualValues(t, "data", qa.Data) + assert.False(t, qa.Prove) // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} - _, err = r.BroadcastTxCommit(context.Background(), txs[0]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxSync(context.Background(), txs[1]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxAsync(context.Background(), txs[2]) - assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxCommit(ctx, txs[0]) + assert.Error(t, err, "expected err on broadcast") + _, err = r.BroadcastTxSync(ctx, txs[1]) + assert.Error(t, err, "expected err on broadcast") + _, err = r.BroadcastTxAsync(ctx, txs[2]) + assert.Error(t, err, "expected err on broadcast") - require.Equal(5, len(r.Calls)) + require.Equal(t, 5, len(r.Calls)) bc := r.Calls[2] - assert.Equal("broadcast_tx_commit", bc.Name) - assert.Nil(bc.Response) - require.NotNil(bc.Error) - assert.EqualValues(bc.Args, txs[0]) + assert.Equal(t, "broadcast_tx_commit", bc.Name) + assert.Nil(t, bc.Response) + require.NotNil(t, bc.Error) + assert.EqualValues(t, bc.Args, txs[0]) bs := r.Calls[3] - assert.Equal("broadcast_tx_sync", bs.Name) - assert.Nil(bs.Response) - require.NotNil(bs.Error) - assert.EqualValues(bs.Args, txs[1]) + assert.Equal(t, "broadcast_tx_sync", bs.Name) + assert.Nil(t, bs.Response) + require.NotNil(t, bs.Error) + assert.EqualValues(t, bs.Args, txs[1]) ba := r.Calls[4] - assert.Equal("broadcast_tx_async", ba.Name) - assert.Nil(ba.Response) - require.NotNil(ba.Error) - assert.EqualValues(ba.Args, txs[2]) + assert.Equal(t, "broadcast_tx_async", ba.Name) + assert.Nil(t, ba.Response) + require.NotNil(t, ba.Error) + assert.EqualValues(t, ba.Args, txs[2]) } func TestABCIApp(t *testing.T) { - assert, require := assert.New(t), require.New(t) app := kvstore.NewApplication() m := mock.ABCIApp{app} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // get some info - info, err := m.ABCIInfo(context.Background()) - require.Nil(err) - assert.Equal(`{"size":0}`, info.Response.GetData()) + info, err := m.ABCIInfo(ctx) + require.NoError(t, err) + assert.Equal(t, `{"size":0}`, info.Response.GetData()) // add a key key, value := "foo", "bar" tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(context.Background(), types.Tx(tx)) - require.Nil(err) - assert.True(res.CheckTx.IsOK()) - require.NotNil(res.DeliverTx) - assert.True(res.DeliverTx.IsOK()) + res, err := m.BroadcastTxCommit(ctx, types.Tx(tx)) + require.NoError(t, err) + assert.True(t, res.CheckTx.IsOK()) + require.NotNil(t, res.DeliverTx) + assert.True(t, res.DeliverTx.IsOK()) // commit // TODO: This may not be necessary in the future @@ -186,14 +190,14 @@ func TestABCIApp(t *testing.T) { // check the key _qres, err := m.ABCIQueryWithOptions( - context.Background(), + ctx, "/key", bytes.HexBytes(key), client.ABCIQueryOptions{Prove: true}, ) qres := _qres.Response - require.Nil(err) - assert.EqualValues(value, qres.Value) + require.NoError(t, err) + assert.EqualValues(t, value, qres.Value) // XXX Check proof } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index a1a42e28db..b47ff1e765 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -22,7 +22,6 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -76,11 +75,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { } func (c Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.env.Status(&rpctypes.Context{}) + return c.env.Status(ctx) } func (c Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - return c.env.ABCIInfo(&rpctypes.Context{}) + return c.env.ABCIInfo(ctx) } func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { @@ -92,69 +91,69 @@ func (c Client) ABCIQueryWithOptions( path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) + return c.env.ABCIQuery(ctx, path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) + return c.env.BroadcastTxCommit(ctx, tx) } func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxAsync(ctx, tx) } func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxSync(ctx, tx) } func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - return c.env.CheckTx(&rpctypes.Context{}, tx) + return c.env.CheckTx(ctx, tx) } func (c Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { - return c.env.NetInfo(&rpctypes.Context{}) + return c.env.NetInfo(ctx) } func (c Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { - return c.env.GetConsensusState(&rpctypes.Context{}) + return c.env.GetConsensusState(ctx) } func (c Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.env.DumpConsensusState(&rpctypes.Context{}) + return c.env.DumpConsensusState(ctx) } func (c Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.env.ConsensusParams(&rpctypes.Context{}, height) + return c.env.ConsensusParams(ctx, height) } func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { - return c.env.Health(&rpctypes.Context{}) + return c.env.Health(ctx) } -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll - return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) +func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + return c.env.BlockchainInfo(ctx, minHeight, maxHeight) } func (c Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { - return c.env.Genesis(&rpctypes.Context{}) + return c.env.Genesis(ctx) } func (c Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.env.Block(&rpctypes.Context{}, height) + return c.env.Block(ctx, height) } func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - return c.env.BlockByHash(&rpctypes.Context{}, hash) + return c.env.BlockByHash(ctx, hash) } func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.env.Commit(&rpctypes.Context{}, height) + return c.env.Commit(ctx, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll - return c.env.Validators(&rpctypes.Context{}, height, page, perPage) +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { + return c.env.Validators(ctx, height, page, perPage) } func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) + return c.env.BroadcastEvidence(ctx, coretypes.Evidence{Value: ev}) } diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 98655280e3..fb70ca9d93 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -14,7 +14,8 @@ import ( ) func TestStatus(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() m := &mock.StatusMock{ Call: mock.Call{ @@ -38,37 +39,37 @@ func TestStatus(t *testing.T) { } r := mock.NewStatusRecorder(m) - require.Equal(0, len(r.Calls)) + require.Equal(t, 0, len(r.Calls)) // make sure response works proper - status, err := r.Status(context.Background()) - require.Nil(err, "%+v", err) - assert.EqualValues("block", status.SyncInfo.LatestBlockHash) - assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) - assert.EqualValues(20, status.SyncInfo.MaxPeerBlockHeight) - assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) - assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + status, err := r.Status(ctx) + require.NoError(t, err) + assert.EqualValues(t, "block", status.SyncInfo.LatestBlockHash) + assert.EqualValues(t, 10, status.SyncInfo.LatestBlockHeight) + assert.EqualValues(t, 20, status.SyncInfo.MaxPeerBlockHeight) + assert.EqualValues(t, time.Second, status.SyncInfo.TotalSyncedTime) + assert.EqualValues(t, time.Minute, status.SyncInfo.RemainingTime) // make sure recorder works properly - require.Equal(1, len(r.Calls)) + require.Equal(t, 1, len(r.Calls)) rs := r.Calls[0] - assert.Equal("status", rs.Name) - assert.Nil(rs.Args) - assert.Nil(rs.Error) - require.NotNil(rs.Response) + assert.Equal(t, "status", rs.Name) + assert.Nil(t, rs.Args) + assert.Nil(t, rs.Error) + require.NotNil(t, rs.Response) st, ok := rs.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.EqualValues("block", st.SyncInfo.LatestBlockHash) - assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) - assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight) - assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) - assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + require.True(t, ok) + assert.EqualValues(t, "block", st.SyncInfo.LatestBlockHash) + assert.EqualValues(t, 10, st.SyncInfo.LatestBlockHeight) + assert.EqualValues(t, 20, st.SyncInfo.MaxPeerBlockHeight) + assert.EqualValues(t, time.Second, status.SyncInfo.TotalSyncedTime) + assert.EqualValues(t, time.Minute, status.SyncInfo.RemainingTime) - assert.EqualValues(10, st.SyncInfo.TotalSnapshots) - assert.EqualValues(time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) - assert.EqualValues(10, st.SyncInfo.SnapshotHeight) - assert.EqualValues(9, status.SyncInfo.SnapshotChunksCount) - assert.EqualValues(10, status.SyncInfo.SnapshotChunksTotal) - assert.EqualValues(9, status.SyncInfo.BackFilledBlocks) - assert.EqualValues(10, status.SyncInfo.BackFillBlocksTotal) + assert.EqualValues(t, 10, st.SyncInfo.TotalSnapshots) + assert.EqualValues(t, time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) + assert.EqualValues(t, 10, st.SyncInfo.SnapshotHeight) + assert.EqualValues(t, 9, status.SyncInfo.SnapshotChunksCount) + assert.EqualValues(t, 10, status.SyncInfo.SnapshotChunksTotal) + assert.EqualValues(t, 9, status.SyncInfo.BackFilledBlocks) + assert.EqualValues(t, 10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 898f67aa6a..ffa1d1f298 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -457,6 +457,52 @@ func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.Resu return r0, r1 } +// Header provides a mock function with given fields: ctx, height +func (_m *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultHeader + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultHeader); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHeader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *Client) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultHeader + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultHeader); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHeader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Health provides a mock function with given fields: _a0 func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { ret := _m.Called(_a0) @@ -480,20 +526,6 @@ func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { return r0, r1 } -// IsRunning provides a mock function with given fields: -func (_m *Client) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - // NetInfo provides a mock function with given fields: _a0 func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) @@ -540,13 +572,27 @@ func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUncon return r0, r1 } -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() +// RemoveTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -577,20 +623,6 @@ func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { return r0, r1 } -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) @@ -667,13 +699,13 @@ func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page * return r0, r1 } -// UnconfirmedTxs provides a mock function with given fields: ctx, limit -func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(ctx, limit) +// UnconfirmedTxs provides a mock function with given fields: ctx, page, perPage +func (_m *Client) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, page, perPage) var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(ctx, limit) + if rf, ok := ret.Get(0).(func(context.Context, *int, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, page, perPage) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) @@ -681,8 +713,8 @@ func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.Re } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { - r1 = rf(ctx, limit) + if rf, ok := ret.Get(1).(func(context.Context, *int, *int) error); ok { + r1 = rf(ctx, page, perPage) } else { r1 = ret.Error(1) } diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 38766e0478..883716c199 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/base64" + "encoding/json" "fmt" "math" "net/http" @@ -20,7 +21,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/mempool" - tmjson "github.com/tendermint/tendermint/libs/json" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" @@ -33,38 +34,39 @@ import ( "github.com/tendermint/tendermint/types" ) -func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { +func getHTTPClient(t *testing.T, logger log.Logger, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, c.Start(ctx)) - c.SetLogger(log.TestingLogger()) + c.Logger = logger t.Cleanup(func() { - if c.IsRunning() { - require.NoError(t, c.Stop()) - } + cancel() + require.NoError(t, c.Stop()) }) return c } -func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { +func getHTTPClientWithTimeout(t *testing.T, logger log.Logger, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - http.DefaultClient.Timeout = timeout - c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + tclient := &http.Client{Timeout: timeout} + c, err := rpchttp.NewWithClient(rpcAddr, tclient) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, c.Start(ctx)) - c.SetLogger(log.TestingLogger()) + c.Logger = logger t.Cleanup(func() { - http.DefaultClient.Timeout = 0 - if c.IsRunning() { - require.NoError(t, c.Stop()) - } + cancel() + require.NoError(t, c.Stop()) }) return c @@ -77,12 +79,13 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. node, ok := ns.(rpclocal.NodeService) require.True(t, ok) - ncl, err := rpclocal.New(node) + logger := log.NewTestingLogger(t) + ncl, err := rpclocal.New(logger, node) require.NoError(t, err) return []client.Client{ ncl, - getHTTPClient(t, conf), + getHTTPClient(t, logger, conf), } } @@ -90,7 +93,9 @@ func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, conf := NodeSuite(t) + logger := log.NewTestingLogger(t) + + _, conf := NodeSuite(t, logger) t.Run("NilCustomHTTPClient", func(t *testing.T) { _, err := rpchttp.NewWithClient("http://example.com", nil) @@ -108,7 +113,7 @@ func TestClientOperations(t *testing.T) { t.Run("CustomHTTPClient", func(t *testing.T) { remote := conf.RPC.ListenAddress c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) + require.NoError(t, err) status, err := c.Status(ctx) require.NoError(t, err) require.NotNil(t, status) @@ -118,24 +123,26 @@ func TestClientOperations(t *testing.T) { remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) req.Header.Set("Origin", origin) resp, err := http.DefaultClient.Do(req) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) defer resp.Body.Close() assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) }) t.Run("Batching", func(t *testing.T) { t.Run("JSONRPCCalls", func(t *testing.T) { - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + c := getHTTPClient(t, logger, conf) testBatchedJSONRPCCalls(ctx, t, c) }) t.Run("JSONRPCCallsCancellation", func(t *testing.T) { _, _, tx1 := MakeTxKV() _, _, tx2 := MakeTxKV() - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + c := getHTTPClient(t, logger, conf) batch := c.NewBatch() _, err := batch.BroadcastTxCommit(ctx, tx1) require.NoError(t, err) @@ -149,19 +156,25 @@ func TestClientOperations(t *testing.T) { require.Equal(t, 0, batch.Count()) }) t.Run("SendingEmptyRequest", func(t *testing.T) { - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) batch := c.NewBatch() _, err := batch.Send(ctx) require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") }) t.Run("ClearingEmptyRequest", func(t *testing.T) { - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) batch := c.NewBatch() require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") }) t.Run("ConcurrentJSONRPC", func(t *testing.T) { + logger := log.NewTestingLogger(t) + var wg sync.WaitGroup - c := getHTTPClient(t, conf) + c := getHTTPClient(t, logger, conf) for i := 0; i < 50; i++ { wg.Add(1) go func() { @@ -172,30 +185,15 @@ func TestClientOperations(t *testing.T) { wg.Wait() }) }) - t.Run("HTTPReturnsErrorIfClientIsNotRunning", func(t *testing.T) { - c := getHTTPClientWithTimeout(t, conf, 100*time.Millisecond) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) - }) } // Make sure info is correct (we connect properly) func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) + logger := log.NewTestingLogger(t) + + n, conf := NodeSuite(t, logger) // for broadcast tx tests pool := getMempool(t, n) @@ -208,7 +206,7 @@ func TestClientMethodCalls(t *testing.T) { t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { t.Run("Status", func(t *testing.T) { status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) }) t.Run("Info", func(t *testing.T) { @@ -225,7 +223,7 @@ func TestClientMethodCalls(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.True(t, netinfo.Listening) assert.Equal(t, 0, len(netinfo.Peers)) }) @@ -234,7 +232,7 @@ func TestClientMethodCalls(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) assert.Empty(t, cons.Peers) }) @@ -243,19 +241,19 @@ func TestClientMethodCalls(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) }) t.Run("Health", func(t *testing.T) { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) }) t.Run("GenesisAndValidators", func(t *testing.T) { // make sure this is the right genesis file gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) // get the genesis validator require.Equal(t, 1, len(gen.Genesis.Validators)) gval := gen.Genesis.Validators[0] @@ -263,7 +261,7 @@ func TestClientMethodCalls(t *testing.T) { // get the current validators h := int64(1) vals, err := c.Validators(ctx, &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) require.Equal(t, 1, len(vals.Validators)) require.Equal(t, 1, vals.Count) require.Equal(t, 1, vals.Total) @@ -289,7 +287,7 @@ func TestClientMethodCalls(t *testing.T) { doc := []byte(strings.Join(decoded, "")) var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), + require.NoError(t, json.Unmarshal(doc, &out), "first: %+v, doc: %s", first, string(doc)) }) t.Run("ABCIQuery", func(t *testing.T) { @@ -302,7 +300,7 @@ func TestClientMethodCalls(t *testing.T) { apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state // wait before querying - err = client.WaitForHeight(c, apph, nil) + err = client.WaitForHeight(ctx, c, apph, nil) require.NoError(t, err) res, err := c.ABCIQuery(ctx, "/key", k) qres := res.Response @@ -331,7 +329,7 @@ func TestClientMethodCalls(t *testing.T) { apph := txh + 1 // this is where the tx will be applied to the state // wait before querying - err = client.WaitForHeight(c, apph, nil) + err = client.WaitForHeight(ctx, c, apph, nil) require.NoError(t, err) _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) @@ -359,6 +357,15 @@ func TestClientMethodCalls(t *testing.T) { require.NoError(t, err) require.Equal(t, block, blockByHash) + // check that the header matches the block hash + header, err := c.Header(ctx, &apph) + require.NoError(t, err) + require.Equal(t, block.Block.Header, *header.Header) + + headerByHash, err := c.HeaderByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, header, headerByHash) + // now check the results blockResults, err := c.BlockResults(ctx, &txh) require.NoError(t, err, "%d: %+v", i, err) @@ -402,21 +409,24 @@ func TestClientMethodCalls(t *testing.T) { // XXX Test proof }) t.Run("BlockchainInfo", func(t *testing.T) { - err := client.WaitForHeight(c, 10, nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := client.WaitForHeight(ctx, c, 10, nil) require.NoError(t, err) res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.True(t, res.LastHeight > 0) assert.True(t, len(res.BlockMetas) > 0) res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.True(t, res.LastHeight > 0) assert.True(t, len(res.BlockMetas) == 1) res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.True(t, res.LastHeight > 0) assert.True(t, len(res.BlockMetas) < 100) for _, m := range res.BlockMetas { @@ -424,14 +434,14 @@ func TestClientMethodCalls(t *testing.T) { } res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) + require.Error(t, err) assert.Nil(t, res) assert.Contains(t, err.Error(), "can't be greater than max") }) t.Run("BroadcastTxCommit", func(t *testing.T) { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) require.True(t, bres.CheckTx.IsOK()) require.True(t, bres.DeliverTx.IsOK()) @@ -441,7 +451,7 @@ func TestClientMethodCalls(t *testing.T) { _, _, tx := MakeTxKV() initMempoolSize := pool.Size() bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME require.Equal(t, initMempoolSize+1, pool.Size()) @@ -460,21 +470,9 @@ func TestClientMethodCalls(t *testing.T) { assert.Equal(t, 0, pool.Size(), "mempool must be empty") }) t.Run("Events", func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - t.Run("Header", func(t *testing.T) { evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) _, ok := evt.(types.EventDataNewBlockHeader) require.True(t, ok, "%d: %#v", i, evt) // TODO: more checks... @@ -513,13 +511,18 @@ func TestClientMethodCalls(t *testing.T) { }) }) t.Run("Evidence", func(t *testing.T) { - t.Run("BraodcastDuplicateVote", func(t *testing.T) { - chainID := conf.ChainID() + t.Run("BroadcastDuplicateVote", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - correct, fakes := makeEvidences(t, pv, chainID) + chainID := conf.ChainID() // make sure that the node has produced enough blocks waitForBlock(ctx, t, c, 2) + evidenceHeight := int64(1) + block, _ := c.Block(ctx, &evidenceHeight) + ts := block.Block.Time + correct, fakes := makeEvidences(t, pv, chainID, ts) result, err := c.BroadcastEvidence(ctx, correct) require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) @@ -527,7 +530,7 @@ func TestClientMethodCalls(t *testing.T) { status, err := c.Status(ctx) require.NoError(t, err) - err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + err = client.WaitForHeight(ctx, c, status.SyncInfo.LatestBlockHeight+2, nil) require.NoError(t, err) ed25519pub := pv.Key.PubKey.(ed25519.PubKey) @@ -551,11 +554,10 @@ func TestClientMethodCalls(t *testing.T) { _, err := c.BroadcastEvidence(ctx, fake) require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) } - }) t.Run("BroadcastEmpty", func(t *testing.T) { _, err := c.BroadcastEvidence(ctx, nil) - assert.Error(t, err) + require.Error(t, err) }) }) }) @@ -565,10 +567,10 @@ func TestClientMethodCalls(t *testing.T) { func getMempool(t *testing.T, srv service.Service) mempool.Mempool { t.Helper() n, ok := srv.(interface { - Mempool() mempool.Mempool + RPCEnvironment() *rpccore.Environment }) require.True(t, ok) - return n.Mempool() + return n.RPCEnvironment().Mempool } // these cases are roughly the same as the TestClientMethodCalls, but @@ -579,33 +581,50 @@ func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) + logger := log.NewTestingLogger(t) + + n, conf := NodeSuite(t, logger) pool := getMempool(t, n) t.Run("UnconfirmedTxs", func(t *testing.T) { - _, _, tx := MakeTxKV() - ch := make(chan struct{}) + // populate mempool with 5 tx + txs := make([]types.Tx, 5) + ch := make(chan error, 5) + for i := 0; i < 5; i++ { + _, _, tx := MakeTxKV() - err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) - require.NoError(t, err) + txs[i] = tx + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { ch <- nil }, mempool.TxInfo{}) + require.NoError(t, err) + } // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") + for i := 0; i < 5; i++ { + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } } + close(ch) for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) - require.NoError(t, err) + for i := 1; i <= 2; i++ { + mc := c.(client.MempoolClient) + page, perPage := i, 3 + res, err := mc.UnconfirmedTxs(ctx, &page, &perPage) + require.NoError(t, err) - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + if i == 2 { + perPage = 2 + } + assert.Equal(t, perPage, res.Count) + assert.Equal(t, 5, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + for _, tx := range res.Txs { + assert.Contains(t, txs, tx) + } + } } pool.Flush() @@ -632,7 +651,7 @@ func TestClientMethodCallsAdvanced(t *testing.T) { mc, ok := c.(client.MempoolClient) require.True(t, ok, "%d", i) res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) + require.NoError(t, err, "%d: %+v", i, err) assert.Equal(t, mempoolSize, res.Count) assert.Equal(t, mempoolSize, res.Total) @@ -642,12 +661,14 @@ func TestClientMethodCallsAdvanced(t *testing.T) { pool.Flush() }) t.Run("Tx", func(t *testing.T) { - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) // first we broadcast a tx _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) txHeight := bres.Height txHash := bres.Hash @@ -677,9 +698,9 @@ func TestClientMethodCallsAdvanced(t *testing.T) { ptx, err := c.Tx(ctx, tc.hash, tc.prove) if !tc.valid { - require.NotNil(t, err) + require.Error(t, err) } else { - require.Nil(t, err, "%+v", err) + require.NoError(t, err, "%+v", err) assert.EqualValues(t, txHeight, ptx.Height) assert.EqualValues(t, tx, ptx.Tx) assert.Zero(t, ptx.Index) @@ -698,7 +719,9 @@ func TestClientMethodCallsAdvanced(t *testing.T) { } }) t.Run("TxSearchWithTimeout", func(t *testing.T) { - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) + logger := log.NewTestingLogger(t) + + timeoutClient := getHTTPClientWithTimeout(t, logger, conf, 10*time.Second) _, _, tx := MakeTxKV() _, err := timeoutClient.BroadcastTxCommit(ctx, tx) @@ -706,12 +729,14 @@ func TestClientMethodCallsAdvanced(t *testing.T) { // query using a compositeKey (see kvstore application) result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") }) t.Run("TxSearch", func(t *testing.T) { t.Skip("Test Asserts Non-Deterministic Results") - c := getHTTPClient(t, conf) + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) // first we broadcast a few txs for i := 0; i < 10; i++ { @@ -732,10 +757,9 @@ func TestClientMethodCallsAdvanced(t *testing.T) { for _, c := range GetClients(t, n, conf) { t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { - // now we query for the tx. result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 1) require.Equal(t, find.Hash, result.Txs[0].Hash) @@ -753,51 +777,51 @@ func TestClientMethodCallsAdvanced(t *testing.T) { // query by height result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 1) // query for non existing tx result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") // query using an index key result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") // query using an noindex key result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") // query using a compositeKey (see kvstore application) and height result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") // query a non existing tx with page 1 and txsPerPage 1 perPage := 1 result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) + require.NoError(t, err) require.Len(t, result.Txs, 0) // check sorting result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) + require.NoError(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) } result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) + require.NoError(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) @@ -858,7 +882,7 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 - err = client.WaitForHeight(c, apph, nil) + err = client.WaitForHeight(ctx, c, apph, nil) require.NoError(t, err) q1, err := batch.ABCIQuery(ctx, "/key", k1) diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index ecb058312d..a25d348e35 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -7,6 +7,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -26,7 +27,7 @@ var ( // List of blocks type ResultBlockchainInfo struct { - LastHeight int64 `json:"last_height"` + LastHeight int64 `json:"last_height,string"` BlockMetas []*types.BlockMeta `json:"block_metas"` } @@ -40,8 +41,8 @@ type ResultGenesis struct { // document to JSON and then splitting the resulting payload into // 16 megabyte blocks and then base64 encoding each block. type ResultGenesisChunk struct { - ChunkNumber int `json:"chunk"` - TotalChunks int `json:"total"` + ChunkNumber int `json:"chunk,string"` + TotalChunks int `json:"total,string"` Data string `json:"data"` } @@ -51,6 +52,11 @@ type ResultBlock struct { Block *types.Block `json:"block"` } +// ResultHeader represents the response for a Header RPC Client query +type ResultHeader struct { + Header *types.Header `json:"header"` +} + // Commit and Header type ResultCommit struct { types.SignedHeader `json:"signed_header"` @@ -59,9 +65,9 @@ type ResultCommit struct { // ABCI results from a block type ResultBlockResults struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` TxsResults []*abci.ResponseDeliverTx `json:"txs_results"` - TotalGasUsed int64 `json:"total_gas_used"` + TotalGasUsed int64 `json:"total_gas_used,string"` BeginBlockEvents []abci.Event `json:"begin_block_events"` EndBlockEvents []abci.Event `json:"end_block_events"` ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` @@ -86,42 +92,77 @@ func NewResultCommit(header *types.Header, commit *types.Commit, type SyncInfo struct { LatestBlockHash bytes.HexBytes `json:"latest_block_hash"` LatestAppHash bytes.HexBytes `json:"latest_app_hash"` - LatestBlockHeight int64 `json:"latest_block_height"` + LatestBlockHeight int64 `json:"latest_block_height,string"` LatestBlockTime time.Time `json:"latest_block_time"` EarliestBlockHash bytes.HexBytes `json:"earliest_block_hash"` EarliestAppHash bytes.HexBytes `json:"earliest_app_hash"` - EarliestBlockHeight int64 `json:"earliest_block_height"` + EarliestBlockHeight int64 `json:"earliest_block_height,string"` EarliestBlockTime time.Time `json:"earliest_block_time"` - MaxPeerBlockHeight int64 `json:"max_peer_block_height"` + MaxPeerBlockHeight int64 `json:"max_peer_block_height,string"` CatchingUp bool `json:"catching_up"` - TotalSyncedTime time.Duration `json:"total_synced_time"` - RemainingTime time.Duration `json:"remaining_time"` + TotalSyncedTime time.Duration `json:"total_synced_time,string"` + RemainingTime time.Duration `json:"remaining_time,string"` + + TotalSnapshots int64 `json:"total_snapshots,string"` + ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time,string"` + SnapshotHeight int64 `json:"snapshot_height,string"` + SnapshotChunksCount int64 `json:"snapshot_chunks_count,string"` + SnapshotChunksTotal int64 `json:"snapshot_chunks_total,string"` + BackFilledBlocks int64 `json:"backfilled_blocks,string"` + BackFillBlocksTotal int64 `json:"backfill_blocks_total,string"` +} - TotalSnapshots int64 `json:"total_snapshots"` - ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time"` - SnapshotHeight int64 `json:"snapshot_height"` - SnapshotChunksCount int64 `json:"snapshot_chunks_count"` - SnapshotChunksTotal int64 `json:"snapshot_chunks_total"` - BackFilledBlocks int64 `json:"backfilled_blocks"` - BackFillBlocksTotal int64 `json:"backfill_blocks_total"` +type ApplicationInfo struct { + Version string `json:"version"` } // Info about the node's validator type ValidatorInfo struct { - Address bytes.HexBytes `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` + Address bytes.HexBytes + PubKey crypto.PubKey + VotingPower int64 +} + +type validatorInfoJSON struct { + Address bytes.HexBytes `json:"address"` + PubKey json.RawMessage `json:"pub_key"` + VotingPower int64 `json:"voting_power,string"` +} + +func (v ValidatorInfo) MarshalJSON() ([]byte, error) { + pk, err := jsontypes.Marshal(v.PubKey) + if err != nil { + return nil, err + } + return json.Marshal(validatorInfoJSON{ + Address: v.Address, PubKey: pk, VotingPower: v.VotingPower, + }) +} + +func (v *ValidatorInfo) UnmarshalJSON(data []byte) error { + var val validatorInfoJSON + if err := json.Unmarshal(data, &val); err != nil { + return err + } + if err := jsontypes.Unmarshal(val.PubKey, &v.PubKey); err != nil { + return err + } + v.Address = val.Address + v.VotingPower = val.VotingPower + return nil } // Node Status type ResultStatus struct { - NodeInfo types.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo types.NodeInfo `json:"node_info"` + ApplicationInfo ApplicationInfo `json:"application_info,omitempty"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` + LightClientInfo types.LightClientInfo `json:"light_client_info,omitempty"` } // Is TxIndexing enabled @@ -136,7 +177,7 @@ func (s *ResultStatus) TxIndexEnabled() bool { type ResultNetInfo struct { Listening bool `json:"listening"` Listeners []string `json:"listeners"` - NPeers int `json:"n_peers"` + NPeers int `json:"n_peers,string"` Peers []Peer `json:"peers"` } @@ -158,12 +199,11 @@ type Peer struct { // Validators for a height. type ResultValidators struct { - BlockHeight int64 `json:"block_height"` + BlockHeight int64 `json:"block_height,string"` Validators []*types.Validator `json:"validators"` - // Count of actual validators in this result - Count int `json:"count"` - // Total number of validators - Total int `json:"total"` + + Count int `json:"count,string"` // Count of actual validators in this result + Total int `json:"total,string"` // Total number of validators } // ConsensusParams for given height @@ -197,8 +237,7 @@ type ResultBroadcastTx struct { Log string `json:"log"` Codespace string `json:"codespace"` MempoolError string `json:"mempool_error"` - - Hash bytes.HexBytes `json:"hash"` + Hash bytes.HexBytes `json:"hash"` } // CheckTx and DeliverTx results @@ -206,7 +245,7 @@ type ResultBroadcastTxCommit struct { CheckTx abci.ResponseCheckTx `json:"check_tx"` DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` } // ResultCheckTx wraps abci.ResponseCheckTx. @@ -217,7 +256,7 @@ type ResultCheckTx struct { // Result of querying for a tx type ResultTx struct { Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` Index uint32 `json:"index"` TxResult abci.ResponseDeliverTx `json:"tx_result"` Tx types.Tx `json:"tx"` @@ -227,20 +266,20 @@ type ResultTx struct { // Result of searching for txs type ResultTxSearch struct { Txs []*ResultTx `json:"txs"` - TotalCount int `json:"total_count"` + TotalCount int `json:"total_count,string"` } // ResultBlockSearch defines the RPC response type for a block search by events. type ResultBlockSearch struct { Blocks []*ResultBlock `json:"blocks"` - TotalCount int `json:"total_count"` + TotalCount int `json:"total_count,string"` } // List of mempool txs type ResultUnconfirmedTxs struct { - Count int `json:"n_txs"` - Total int `json:"total"` - TotalBytes int64 `json:"total_bytes"` + Count int `json:"n_txs,string"` + Total int `json:"total,string"` + TotalBytes int64 `json:"total_bytes,string"` Txs []types.Tx `json:"txs"` } @@ -270,8 +309,51 @@ type ( // Event data from a subscription type ResultEvent struct { - SubscriptionID string `json:"subscription_id"` - Query string `json:"query"` - Data types.TMEventData `json:"data"` - Events []abci.Event `json:"events"` + SubscriptionID string + Query string + Data types.EventData + Events []abci.Event +} + +type resultEventJSON struct { + SubscriptionID string `json:"subscription_id"` + Query string `json:"query"` + Data json.RawMessage `json:"data"` + Events []abci.Event `json:"events"` } + +func (r ResultEvent) MarshalJSON() ([]byte, error) { + evt, err := jsontypes.Marshal(r.Data) + if err != nil { + return nil, err + } + return json.Marshal(resultEventJSON{ + SubscriptionID: r.SubscriptionID, + Query: r.Query, + Data: evt, + Events: r.Events, + }) +} + +func (r *ResultEvent) UnmarshalJSON(data []byte) error { + var res resultEventJSON + if err := json.Unmarshal(data, &res); err != nil { + return err + } + if err := jsontypes.Unmarshal(res.Data, &r.Data); err != nil { + return err + } + r.SubscriptionID = res.SubscriptionID + r.Query = res.Query + r.Events = res.Events + return nil +} + +// Evidence is an argument wrapper for a types.Evidence value, that handles +// encoding and decoding through JSON. +type Evidence struct { + Value types.Evidence +} + +func (e Evidence) MarshalJSON() ([]byte, error) { return jsontypes.Marshal(e.Value) } +func (e *Evidence) UnmarshalJSON(data []byte) error { return jsontypes.Unmarshal(data, &e.Value) } diff --git a/rpc/jsonrpc/client/args_test.go b/rpc/jsonrpc/client/args_test.go deleted file mode 100644 index 2506f30734..0000000000 --- a/rpc/jsonrpc/client/args_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type Tx []byte - -type Foo struct { - Bar int - Baz string -} - -func TestArgToJSON(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - - cases := []struct { - input interface{} - expected string - }{ - {[]byte("1234"), "0x31323334"}, - {Tx("654"), "0x363534"}, - {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, - } - - for i, tc := range cases { - args := map[string]interface{}{"data": tc.input} - err := argsToJSON(args) - require.Nil(err, "%d: %+v", i, err) - require.Equal(1, len(args), "%d", i) - data, ok := args["data"].(string) - require.True(ok, "%d: %#v", i, args["data"]) - assert.Equal(tc.expected, data, "%d", i) - } -} diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index f69926cb74..2babcf70c3 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -2,125 +2,69 @@ package client import ( "encoding/json" - "errors" "fmt" - tmjson "github.com/tendermint/tendermint/libs/json" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -func unmarshalResponseBytes( - responseBytes []byte, - expectedID rpctypes.JSONRPCIntID, - result interface{}, -) (interface{}, error) { - +func unmarshalResponseBytes(responseBytes []byte, expectedID string, result interface{}) error { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. - response := &rpctypes.RPCResponse{} - if err := json.Unmarshal(responseBytes, response); err != nil { - return nil, fmt.Errorf("error unmarshaling: %w", err) + var response rpctypes.RPCResponse + if err := json.Unmarshal(responseBytes, &response); err != nil { + return fmt.Errorf("unmarshaling response: %w", err) } if response.Error != nil { - return nil, response.Error + return response.Error } - if err := validateAndVerifyID(response, expectedID); err != nil { - return nil, fmt.Errorf("wrong ID: %w", err) + if got := response.ID(); got != expectedID { + return fmt.Errorf("got response ID %q, wanted %q", got, expectedID) } // Unmarshal the RawMessage into the result. - if err := tmjson.Unmarshal(response.Result, result); err != nil { - return nil, fmt.Errorf("error unmarshaling result: %w", err) + if err := json.Unmarshal(response.Result, result); err != nil { + return fmt.Errorf("error unmarshaling result: %w", err) } - - return result, nil + return nil } -func unmarshalResponseBytesArray( - responseBytes []byte, - expectedIDs []rpctypes.JSONRPCIntID, - results []interface{}, -) ([]interface{}, error) { - - var ( - responses []rpctypes.RPCResponse - ) - +func unmarshalResponseBytesArray(responseBytes []byte, expectedIDs []string, results []interface{}) error { + var responses []rpctypes.RPCResponse if err := json.Unmarshal(responseBytes, &responses); err != nil { - return nil, fmt.Errorf("error unmarshaling: %w", err) - } - - // No response error checking here as there may be a mixture of successful - // and unsuccessful responses. - - if len(results) != len(responses) { - return nil, fmt.Errorf( - "expected %d result objects into which to inject responses, but got %d", - len(responses), - len(results), - ) + return fmt.Errorf("unmarshaling responses: %w", err) + } else if len(responses) != len(results) { + return fmt.Errorf("got %d results, wanted %d", len(responses), len(results)) } // Intersect IDs from responses with expectedIDs. - ids := make([]rpctypes.JSONRPCIntID, len(responses)) - var ok bool + ids := make([]string, len(responses)) for i, resp := range responses { - ids[i], ok = resp.ID.(rpctypes.JSONRPCIntID) - if !ok { - return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) - } + ids[i] = resp.ID() } if err := validateResponseIDs(ids, expectedIDs); err != nil { - return nil, fmt.Errorf("wrong IDs: %w", err) + return fmt.Errorf("wrong IDs: %w", err) } - for i := 0; i < len(responses); i++ { - if err := tmjson.Unmarshal(responses[i].Result, results[i]); err != nil { - return nil, fmt.Errorf("error unmarshaling #%d result: %w", i, err) + for i, resp := range responses { + if err := json.Unmarshal(resp.Result, results[i]); err != nil { + return fmt.Errorf("unmarshaling result %d: %w", i, err) } } - - return results, nil + return nil } -func validateResponseIDs(ids, expectedIDs []rpctypes.JSONRPCIntID) error { - m := make(map[rpctypes.JSONRPCIntID]bool, len(expectedIDs)) - for _, expectedID := range expectedIDs { - m[expectedID] = true +func validateResponseIDs(ids, expectedIDs []string) error { + m := make(map[string]struct{}, len(expectedIDs)) + for _, id := range expectedIDs { + m[id] = struct{}{} } for i, id := range ids { - if m[id] { - delete(m, id) - } else { - return fmt.Errorf("unsolicited ID #%d: %v", i, id) + if _, ok := m[id]; !ok { + return fmt.Errorf("unexpected response ID %d: %q", i, id) } } - - return nil -} - -// From the JSON-RPC 2.0 spec: -// id: It MUST be the same as the value of the id member in the Request Object. -func validateAndVerifyID(res *rpctypes.RPCResponse, expectedID rpctypes.JSONRPCIntID) error { - if err := validateResponseID(res.ID); err != nil { - return err - } - if expectedID != res.ID.(rpctypes.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type - return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) - } - return nil -} - -func validateResponseID(id interface{}) error { - if id == nil { - return errors.New("no ID") - } - _, ok := id.(rpctypes.JSONRPCIntID) - if !ok { - return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) - } return nil } diff --git a/rpc/jsonrpc/client/encode.go b/rpc/jsonrpc/client/encode.go deleted file mode 100644 index e085f51a24..0000000000 --- a/rpc/jsonrpc/client/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "fmt" - "net/url" - "reflect" - - tmjson "github.com/tendermint/tendermint/libs/json" -) - -func argsToURLValues(args map[string]interface{}) (url.Values, error) { - values := make(url.Values) - if len(args) == 0 { - return values, nil - } - - err := argsToJSON(args) - if err != nil { - return nil, err - } - - for key, val := range args { - values.Set(key, val.(string)) - } - - return values, nil -} - -func argsToJSON(args map[string]interface{}) error { - for k, v := range args { - rt := reflect.TypeOf(v) - isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 - if isByteSlice { - bytes := reflect.ValueOf(v).Bytes() - args[k] = fmt.Sprintf("0x%X", bytes) - continue - } - - data, err := tmjson.Marshal(v) - if err != nil { - return err - } - args[k] = string(data) - } - return nil -} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 7733eb00cd..c1cad7097e 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -6,14 +6,14 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -106,15 +106,15 @@ func (u parsedURL) GetTrimmedURL() string { //------------------------------------------------------------- -// HTTPClient is a common interface for JSON-RPC HTTP clients. -type HTTPClient interface { - // Call calls the given method with the params and returns a result. - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) -} - -// Caller implementers can facilitate calling the JSON-RPC endpoint. +// A Caller handles the round trip of a single JSON-RPC request. The +// implementation is responsible for assigning request IDs, marshaling +// parameters, and unmarshaling results. type Caller interface { - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) + // Call sends a new request for method to the server with the given + // parameters. If params == nil, the request has empty parameters. + // If result == nil, any result value must be discarded without error. + // Otherwise the concrete value of result must be a pointer. + Call(ctx context.Context, method string, params, result interface{}) error } //------------------------------------------------------------- @@ -130,12 +130,10 @@ type Client struct { client *http.Client - mtx tmsync.Mutex + mtx sync.Mutex nextReqID int } -var _ HTTPClient = (*Client)(nil) - // Both Client and RequestBatch can facilitate calls to the JSON // RPC endpoint. var _ Caller = (*Client)(nil) @@ -152,8 +150,8 @@ func New(remote string) (*Client, error) { } // NewWithHTTPClient returns a Client pointed at the given address using a -// custom http client. An error is returned on invalid remote. The function -// panics when client is nil. +// custom HTTP client. It reports an error if c == nil or if remote is not a +// valid URL. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { return nil, errors.New("nil client") @@ -182,28 +180,23 @@ func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { // Call issues a POST HTTP request. Requests are JSON encoded. Content-Type: // application/json. -func (c *Client) Call( - ctx context.Context, - method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { +func (c *Client) Call(ctx context.Context, method string, params, result interface{}) error { id := c.nextRequestID() - request, err := rpctypes.MapToRequest(id, method, params) - if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) + request := rpctypes.NewRequest(id) + if err := request.SetMethodAndParams(method, params); err != nil { + return fmt.Errorf("failed to encode params: %w", err) } requestBytes, err := json.Marshal(request) if err != nil { - return nil, fmt.Errorf("failed to marshal request: %w", err) + return fmt.Errorf("failed to marshal request: %w", err) } requestBuf := bytes.NewBuffer(requestBytes) httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, requestBuf) if err != nil { - return nil, fmt.Errorf("request setup failed: %w", err) + return fmt.Errorf("request setup failed: %w", err) } httpRequest.Header.Set("Content-Type", "application/json") @@ -214,17 +207,16 @@ func (c *Client) Call( httpResponse, err := c.client.Do(httpRequest) if err != nil { - return nil, err + return err } - defer httpResponse.Body.Close() - - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) + httpResponse.Body.Close() if err != nil { - return nil, fmt.Errorf("failed to read response body: %w", err) + return fmt.Errorf("reading response body: %w", err) } - return unmarshalResponseBytes(responseBytes, id, result) + return unmarshalResponseBytes(responseBytes, request.ID(), result) } // NewRequestBatch starts a batch of requests for this client. @@ -265,28 +257,30 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque return nil, fmt.Errorf("post: %w", err) } - defer httpResponse.Body.Close() - - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) + httpResponse.Body.Close() if err != nil { - return nil, fmt.Errorf("read response body: %w", err) + return nil, fmt.Errorf("reading response body: %w", err) } // collect ids to check responses IDs in unmarshalResponseBytesArray - ids := make([]rpctypes.JSONRPCIntID, len(requests)) + ids := make([]string, len(requests)) for i, req := range requests { - ids[i] = req.request.ID.(rpctypes.JSONRPCIntID) + ids[i] = req.request.ID() } - return unmarshalResponseBytesArray(responseBytes, ids, results) + if err := unmarshalResponseBytesArray(responseBytes, ids, results); err != nil { + return nil, err + } + return results, nil } -func (c *Client) nextRequestID() rpctypes.JSONRPCIntID { +func (c *Client) nextRequestID() int { c.mtx.Lock() + defer c.mtx.Unlock() id := c.nextReqID c.nextReqID++ - c.mtx.Unlock() - return rpctypes.JSONRPCIntID(id) + return id } //------------------------------------------------------------------------------------ @@ -304,7 +298,7 @@ type jsonRPCBufferedRequest struct { type RequestBatch struct { client *Client - mtx tmsync.Mutex + mtx sync.Mutex requests []*jsonRPCBufferedRequest } @@ -348,19 +342,13 @@ func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { // Call enqueues a request to call the given RPC method with the specified // parameters, in the same way that the `Client.Call` function would. -func (b *RequestBatch) Call( - _ context.Context, - method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { - id := b.client.nextRequestID() - request, err := rpctypes.MapToRequest(id, method, params) - if err != nil { - return nil, err +func (b *RequestBatch) Call(_ context.Context, method string, params, result interface{}) error { + request := rpctypes.NewRequest(b.client.nextRequestID()) + if err := request.SetMethodAndParams(method, params); err != nil { + return err } b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) - return result, nil + return nil } //------------------------------------------------------------- diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 5a03af5125..d6b455d63b 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,7 +1,7 @@ package client import ( - "io/ioutil" + "io" "log" "net/http" "net/http/httptest" @@ -21,13 +21,13 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { defer tsTLS.Close() // This silences a TLS handshake error, caused by the dialer just immediately // disconnecting, which we can just ignore. - tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) require.NoError(t, err) dialFn, err := makeHTTPDialer(testURL) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestMakeHTTPDialerURL(t *testing.T) { u, err := newParsedURL(remote) require.NoError(t, err) dialFn, err := makeHTTPDialer(remote) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.NoError(t, err) @@ -105,7 +105,7 @@ func TestMakeHTTPDialerURL(t *testing.T) { u, err := newParsedURL(errorURL) require.NoError(t, err) dialFn, err := makeHTTPDialer(errorURL) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.Error(t, err) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go deleted file mode 100644 index cd4ff06868..0000000000 --- a/rpc/jsonrpc/client/http_uri_client.go +++ /dev/null @@ -1,85 +0,0 @@ -package client - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "strings" - - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -const ( - // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = rpctypes.JSONRPCIntID(-1) -) - -// URIClient is a JSON-RPC client, which sends POST form HTTP requests to the -// remote server. -// -// URIClient is safe for concurrent use by multiple goroutines. -type URIClient struct { - address string - client *http.Client -} - -var _ HTTPClient = (*URIClient)(nil) - -// NewURI returns a new client. -// An error is returned on invalid remote. -// The function panics when remote is nil. -func NewURI(remote string) (*URIClient, error) { - parsedURL, err := newParsedURL(remote) - if err != nil { - return nil, err - } - - httpClient, err := DefaultHTTPClient(remote) - if err != nil { - return nil, err - } - - parsedURL.SetDefaultSchemeHTTP() - - uriClient := &URIClient{ - address: parsedURL.GetTrimmedURL(), - client: httpClient, - } - - return uriClient, nil -} - -// Call issues a POST form HTTP request. -func (c *URIClient) Call(ctx context.Context, method string, - params map[string]interface{}, result interface{}) (interface{}, error) { - - values, err := argsToURLValues(params) - if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) - } - - req, err := http.NewRequestWithContext( - ctx, - http.MethodPost, - c.address+"/"+method, - strings.NewReader(values.Encode()), - ) - if err != nil { - return nil, fmt.Errorf("new request: %w", err) - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := c.client.Do(req) - if err != nil { - return nil, fmt.Errorf("post: %w", err) - } - defer resp.Body.Close() - - responseBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("read response body: %w", err) - } - - return unmarshalResponseBytes(responseBytes, URIClientRequestID, result) -} diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 26f24d2555..f53b28802d 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -8,6 +8,7 @@ package client import ( "bytes" + "context" "errors" "net" "regexp" @@ -15,45 +16,36 @@ import ( "time" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/log" ) func TestWSClientReconnectWithJitter(t *testing.T) { - n := 8 - maxReconnectAttempts := 3 - // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... - maxSleepTime := time.Second * time.Duration(((1< 0 { // ticker with a predefined period @@ -405,10 +389,9 @@ func (c *WSClient) writeRoutine() { c.mtx.Lock() c.sentLastPingAt = time.Now() c.mtx.Unlock() - c.Logger.Debug("sent ping") case <-c.readRoutineQuit: return - case <-c.Quit(): + case <-ctx.Done(): if err := c.conn.WriteMessage( websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), @@ -422,13 +405,9 @@ func (c *WSClient) writeRoutine() { // The client ensures that there is at most one reader to a connection by // executing all reads from this goroutine. -func (c *WSClient) readRoutine() { +func (c *WSClient) readRoutine(ctx context.Context) { defer func() { c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } c.wg.Done() }() @@ -439,7 +418,6 @@ func (c *WSClient) readRoutine() { c.mtx.RUnlock() c.PingPongLatencyTimer.UpdateSince(t) - c.Logger.Debug("got pong") return nil }) @@ -469,11 +447,6 @@ func (c *WSClient) readRoutine() { continue } - if err = validateResponseID(response.ID); err != nil { - c.Logger.Error("error in response ID", "id", response.ID, "err", err) - continue - } - // TODO: events resulting from /subscribe do not work with -> // because they are implemented as responses with the subscribe request's // ID. According to the spec, they should be notifications (requests @@ -494,7 +467,8 @@ func (c *WSClient) readRoutine() { c.Logger.Info("got response", "id", response.ID, "result", response.Result) select { - case <-c.Quit(): + case <-ctx.Done(): + return case c.ResponsesCh <- response: } } diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 208313e794..37bd64b229 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -9,19 +9,28 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/gorilla/websocket" + metrics "github.com/rcrowley/go-metrics" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -var wsCallTimeout = 5 * time.Second +func init() { + // Disable go-metrics metrics in tests, since they start unsupervised + // goroutines that trip the leak tester. Calling Stop on the metric is not + // sufficient, as that does not wait for the goroutine. + metrics.UseNilMetrics = true +} + +const wsCallTimeout = 5 * time.Second -type myHandler struct { +type myTestHandler struct { closeConnAfterRead bool - mtx tmsync.RWMutex + mtx sync.RWMutex + t *testing.T } var upgrader = websocket.Upgrader{ @@ -29,11 +38,10 @@ var upgrader = websocket.Upgrader{ WriteBufferSize: 1024, } -func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *myTestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - panic(err) - } + require.NoError(h.t, err) + defer conn.Close() for { messageType, in, err := conn.ReadMessage() @@ -43,20 +51,21 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req rpctypes.RPCRequest err = json.Unmarshal(in, &req) - if err != nil { - panic(err) - } + require.NoError(h.t, err) + + func() { + h.mtx.RLock() + defer h.mtx.RUnlock() - h.mtx.RLock() - if h.closeConnAfterRead { - if err := conn.Close(); err != nil { - panic(err) + if h.closeConnAfterRead { + require.NoError(h.t, conn.Close()) } - } - h.mtx.RUnlock() + }() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(rpctypes.RPCResponse{Result: res, ID: req.ID}) + + emptyRespBytes, err := json.Marshal(req.MakeResponse(res)) + require.NoError(h.t, err) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } @@ -64,25 +73,26 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func TestWSClientReconnectsAfterReadFailure(t *testing.T) { - var wg sync.WaitGroup + t.Cleanup(leaktest.Check(t)) // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) defer s.Close() - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - wg.Add(1) - go callWgDoneOnResult(t, c, &wg) + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) + + go handleResponses(ctx, t, c) h.mtx.Lock() h.closeConnAfterRead = true h.mtx.Unlock() // results in WS read error, no send retry because write succeeded - call(t, "a", c) + call(ctx, t, "a", c) // expect to reconnect almost immediately time.Sleep(10 * time.Millisecond) @@ -91,23 +101,23 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { h.mtx.Unlock() // should succeed - call(t, "b", c) - - wg.Wait() + call(ctx, t, "b", c) } func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { - var wg sync.WaitGroup + t.Cleanup(leaktest.Check(t)) // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) + defer s.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) - wg.Add(2) - go callWgDoneOnResult(t, c, &wg) + go handleResponses(ctx, t, c) // hacky way to abort the connection before write if err := c.conn.Close(); err != nil { @@ -115,30 +125,32 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { } // results in WS write error, the client should resend on reconnect - call(t, "a", c) + call(ctx, t, "a", c) // expect to reconnect almost immediately time.Sleep(10 * time.Millisecond) // should succeed - call(t, "b", c) - - wg.Wait() + call(ctx, t, "b", c) } func TestWSClientReconnectFailure(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) go func() { for { select { case <-c.ResponsesCh: - case <-c.Quit(): + case <-ctx.Done(): return } } @@ -152,9 +164,9 @@ func TestWSClientReconnectFailure(t *testing.T) { // results in WS write error // provide timeout to avoid blocking - ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) + cctx, cancel := context.WithTimeout(ctx, wsCallTimeout) defer cancel() - if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { + if err := c.Call(cctx, "a", make(map[string]interface{})); err != nil { t.Error(err) } @@ -164,7 +176,7 @@ func TestWSClientReconnectFailure(t *testing.T) { done := make(chan struct{}) go func() { // client should block on this - call(t, "b", c) + call(ctx, t, "b", c) close(done) }() @@ -178,44 +190,55 @@ func TestWSClientReconnectFailure(t *testing.T) { } func TestNotBlockingOnStop(t *testing.T) { - timeout := 2 * time.Second - s := httptest.NewServer(&myHandler{}) - c := startClient(t, "//"+s.Listener.Addr().String()) - c.Call(context.Background(), "a", make(map[string]interface{})) // nolint:errcheck // ignore for tests - // Let the readRoutine get around to blocking - time.Sleep(time.Second) - passCh := make(chan struct{}) + t.Cleanup(leaktest.Check(t)) + + s := httptest.NewServer(&myTestHandler{t: t}) + defer s.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) + require.NoError(t, c.Call(ctx, "a", make(map[string]interface{}))) + + time.Sleep(200 * time.Millisecond) // give service routines time to start ⚠️ + done := make(chan struct{}) go func() { - // Unless we have a non-blocking write to ResponsesCh from readRoutine - // this blocks forever ont the waitgroup - err := c.Stop() - require.NoError(t, err) - passCh <- struct{}{} + cancel() + if assert.NoError(t, c.Stop()) { + close(done) + } }() select { - case <-passCh: - // Pass - case <-time.After(timeout): - t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", - timeout.Seconds()) + case <-done: + t.Log("Stopped client successfully") + case <-time.After(2 * time.Second): + t.Fatal("Timed out waiting for client to stop") } } -func startClient(t *testing.T, addr string) *WSClient { +func startClient(ctx context.Context, t *testing.T, addr string) *WSClient { + t.Helper() + + t.Cleanup(leaktest.Check(t)) + c, err := NewWS(addr, "/websocket") - require.Nil(t, err) - err = c.Start() - require.Nil(t, err) - c.SetLogger(log.TestingLogger()) + require.NoError(t, err) + require.NoError(t, c.Start(ctx)) return c } -func call(t *testing.T, method string, c *WSClient) { - err := c.Call(context.Background(), method, make(map[string]interface{})) - require.NoError(t, err) +func call(ctx context.Context, t *testing.T, method string, c *WSClient) { + t.Helper() + + err := c.Call(ctx, method, make(map[string]interface{})) + if ctx.Err() == nil { + require.NoError(t, err) + } } -func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { +func handleResponses(ctx context.Context, t *testing.T, c *WSClient) { + t.Helper() + for { select { case resp := <-c.ResponsesCh: @@ -224,9 +247,9 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { return } if resp.Result != nil { - wg.Done() + return } - case <-c.Quit(): + case <-ctx.Done(): return } } diff --git a/rpc/jsonrpc/doc.go b/rpc/jsonrpc/doc.go index 813f055f54..b014fe38dd 100644 --- a/rpc/jsonrpc/doc.go +++ b/rpc/jsonrpc/doc.go @@ -55,7 +55,7 @@ // Define some routes // // var Routes = map[string]*rpcserver.RPCFunc{ -// "status": rpcserver.NewRPCFunc(Status, "arg", false), +// "status": rpcserver.NewRPCFunc(Status, "arg"), // } // // An rpc function: diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 5013590b67..9c34f9fae7 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -6,6 +6,7 @@ import ( crand "crypto/rand" "encoding/json" "fmt" + stdlog "log" mrand "math/rand" "net/http" "os" @@ -13,6 +14,7 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,7 +22,6 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -35,10 +36,6 @@ const ( testVal = "acbd" ) -var ( - ctx = context.Background() -) - type ResultEcho struct { Value string `json:"value"` } @@ -57,65 +54,69 @@ type ResultEchoDataBytes struct { // Define some routes var Routes = map[string]*server.RPCFunc{ - "echo": server.NewRPCFunc(EchoResult, "arg", false), + "echo": server.NewRPCFunc(EchoResult, "arg"), "echo_ws": server.NewWSRPCFunc(EchoWSResult, "arg"), - "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg", false), - "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg", false), - "echo_int": server.NewRPCFunc(EchoIntResult, "arg", false), + "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), + "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), + "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), } -func EchoResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { +func EchoResult(ctx context.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { +func EchoWSResult(ctx context.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *rpctypes.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(ctx context.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *rpctypes.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(ctx context.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *rpctypes.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(ctx context.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } func TestMain(m *testing.M) { - setup() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := setup(ctx); err != nil { + stdlog.Fatal(err.Error()) + } code := m.Run() os.Exit(code) } // launch unix and tcp servers -func setup() { - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +func setup(ctx context.Context) error { + logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) cmd := exec.Command("rm", "-f", unixSocket) err := cmd.Start() if err != nil { - panic(err) + return err } if err = cmd.Wait(); err != nil { - panic(err) + return err } tcpLogger := logger.With("socket", "tcp") mux := http.NewServeMux() server.RegisterRPCFuncs(mux, Routes, tcpLogger) - wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) - wm.SetLogger(tcpLogger) + wm := server.NewWebsocketManager(tcpLogger, Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections) if err != nil { - panic(err) + return err } go func() { - if err := server.Serve(listener1, mux, tcpLogger, config); err != nil { + if err := server.Serve(ctx, listener1, mux, tcpLogger, config); err != nil { panic(err) } }() @@ -123,94 +124,94 @@ func setup() { unixLogger := logger.With("socket", "unix") mux2 := http.NewServeMux() server.RegisterRPCFuncs(mux2, Routes, unixLogger) - wm = server.NewWebsocketManager(Routes) - wm.SetLogger(unixLogger) + wm = server.NewWebsocketManager(unixLogger, Routes) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) listener2, err := server.Listen(unixAddr, config.MaxOpenConnections) if err != nil { - panic(err) + return err } go func() { - if err := server.Serve(listener2, mux2, unixLogger, config); err != nil { + if err := server.Serve(ctx, listener2, mux2, unixLogger, config); err != nil { panic(err) } }() // wait for servers to start time.Sleep(time.Second * 2) + return nil } -func echoViaHTTP(cl client.Caller, val string) (string, error) { +func echoViaHTTP(ctx context.Context, cl client.Caller, val string) (string, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEcho) - if _, err := cl.Call(ctx, "echo", params, result); err != nil { + if err := cl.Call(ctx, "echo", params, result); err != nil { return "", err } return result.Value, nil } -func echoIntViaHTTP(cl client.Caller, val int) (int, error) { +func echoIntViaHTTP(ctx context.Context, cl client.Caller, val int) (int, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEchoInt) - if _, err := cl.Call(ctx, "echo_int", params, result); err != nil { + if err := cl.Call(ctx, "echo_int", params, result); err != nil { return 0, err } return result.Value, nil } -func echoBytesViaHTTP(cl client.Caller, bytes []byte) ([]byte, error) { +func echoBytesViaHTTP(ctx context.Context, cl client.Caller, bytes []byte) ([]byte, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoBytes) - if _, err := cl.Call(ctx, "echo_bytes", params, result); err != nil { + if err := cl.Call(ctx, "echo_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil } -func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { +func echoDataBytesViaHTTP(ctx context.Context, cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoDataBytes) - if _, err := cl.Call(ctx, "echo_data_bytes", params, result); err != nil { + if err := cl.Call(ctx, "echo_data_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil } -func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { +func testWithHTTPClient(ctx context.Context, t *testing.T, cl client.Caller) { val := testVal - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) + got, err := echoViaHTTP(ctx, cl, val) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) - got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) + got2, err := echoBytesViaHTTP(ctx, cl, val2) + require.NoError(t, err) assert.Equal(t, got2, val2) val3 := tmbytes.HexBytes(randBytes(t)) - got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) + got3, err := echoDataBytesViaHTTP(ctx, cl, val3) + require.NoError(t, err) assert.Equal(t, got3, val3) val4 := mrand.Intn(10000) - got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) + got4, err := echoIntViaHTTP(ctx, cl, val4) + require.NoError(t, err) assert.Equal(t, got4, val4) } -func echoViaWS(cl *client.WSClient, val string) (string, error) { +func echoViaWS(ctx context.Context, cl *client.WSClient, val string) (string, error) { params := map[string]interface{}{ "arg": val, } - err := cl.Call(context.Background(), "echo", params) + err := cl.Call(ctx, "echo", params) if err != nil { return "", err } @@ -228,11 +229,11 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { return result.Value, nil } -func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { +func echoBytesViaWS(ctx context.Context, cl *client.WSClient, bytes []byte) ([]byte, error) { params := map[string]interface{}{ "arg": bytes, } - err := cl.Call(context.Background(), "echo_bytes", params) + err := cl.Call(ctx, "echo_bytes", params) if err != nil { return []byte{}, err } @@ -250,71 +251,65 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { return result.Value, nil } -func testWithWSClient(t *testing.T, cl *client.WSClient) { +func testWithWSClient(ctx context.Context, t *testing.T, cl *client.WSClient) { val := testVal - got, err := echoViaWS(cl, val) - require.Nil(t, err) + got, err := echoViaWS(ctx, cl, val) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) - got2, err := echoBytesViaWS(cl, val2) - require.Nil(t, err) + got2, err := echoBytesViaWS(ctx, cl, val2) + require.NoError(t, err) assert.Equal(t, got2, val2) } //------------- func TestServersAndClientsBasic(t *testing.T) { + // TODO: reenable the leak detector once the test fixture is + // managed in the context of this test. + // + // t.Cleanup(leaktest.Check(t)) + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + serverAddrs := [...]string{tcpAddr, unixAddr} for _, addr := range serverAddrs { - cl1, err := client.NewURI(addr) - require.Nil(t, err) - fmt.Printf("=== testing server on %s using URI client", addr) - testWithHTTPClient(t, cl1) - - cl2, err := client.New(addr) - require.Nil(t, err) - fmt.Printf("=== testing server on %s using JSONRPC client", addr) - testWithHTTPClient(t, cl2) - - cl3, err := client.NewWS(addr, websocketEndpoint) - require.Nil(t, err) - cl3.SetLogger(log.TestingLogger()) - err = cl3.Start() - require.Nil(t, err) - fmt.Printf("=== testing server on %s using WS client", addr) - testWithWSClient(t, cl3) - err = cl3.Stop() - require.NoError(t, err) + t.Run(addr, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + + logger := log.NewNopLogger() + + cl2, err := client.New(addr) + require.NoError(t, err) + fmt.Printf("=== testing server on %s using JSONRPC client", addr) + testWithHTTPClient(ctx, t, cl2) + + cl3, err := client.NewWS(addr, websocketEndpoint) + require.NoError(t, err) + cl3.Logger = logger + err = cl3.Start(ctx) + require.NoError(t, err) + fmt.Printf("=== testing server on %s using WS client", addr) + testWithWSClient(ctx, t, cl3) + cancel() + }) } } -func TestHexStringArg(t *testing.T) { - cl, err := client.NewURI(tcpAddr) - require.Nil(t, err) - // should NOT be handled as hex - val := "0xabc" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} +func TestWSNewWSRPCFunc(t *testing.T) { + t.Cleanup(leaktest.Check(t)) -func TestQuotedStringArg(t *testing.T) { - cl, err := client.NewURI(tcpAddr) - require.Nil(t, err) - // should NOT be unquoted - val := "\"abc\"" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func TestWSNewWSRPCFunc(t *testing.T) { cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) + require.NoError(t, err) + cl.Logger = log.NewNopLogger() + err = cl.Start(ctx) + require.NoError(t, err) t.Cleanup(func() { if err := cl.Stop(); err != nil { t.Error(err) @@ -325,8 +320,8 @@ func TestWSNewWSRPCFunc(t *testing.T) { params := map[string]interface{}{ "arg": val, } - err = cl.Call(context.Background(), "echo_ws", params) - require.Nil(t, err) + err = cl.Call(ctx, "echo_ws", params) + require.NoError(t, err) msg := <-cl.ResponsesCh if msg.Error != nil { @@ -334,35 +329,7 @@ func TestWSNewWSRPCFunc(t *testing.T) { } result := new(ResultEcho) err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} - -func TestWSHandlesArrayParams(t *testing.T) { - cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := cl.Stop(); err != nil { - t.Error(err) - } - }) - - val := testVal - params := []interface{}{val} - err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) - require.Nil(t, err) - - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatalf("%+v", err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) + require.NoError(t, err) got := result.Value assert.Equal(t, got, val) } @@ -370,11 +337,16 @@ func TestWSHandlesArrayParams(t *testing.T) { // TestWSClientPingPong checks that a client & server exchange pings // & pongs so connection stays alive. func TestWSClientPingPong(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) + require.NoError(t, err) + cl.Logger = log.NewNopLogger() + err = cl.Start(ctx) + require.NoError(t, err) t.Cleanup(func() { if err := cl.Stop(); err != nil { t.Error(err) @@ -388,6 +360,6 @@ func randBytes(t *testing.T) []byte { n := mrand.Intn(10) + 2 buf := make([]byte, n) _, err := crand.Read(buf) - require.Nil(t, err) + require.NoError(t, err) return bytes.ReplaceAll(buf, []byte("="), []byte{100}) } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index fbc0cca791..94da974de4 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -2,17 +2,18 @@ package server import ( "bytes" + "context" "encoding/json" "errors" "fmt" - "io/ioutil" + "html/template" + "io" "net/http" "reflect" - "sort" + "strconv" + "strings" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -20,130 +21,75 @@ import ( // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) + return func(w http.ResponseWriter, hreq *http.Request) { + // For POST requests, reject a non-root URL path. This should not happen + // in the standard configuration, since the wrapper checks the path. + if hreq.URL.Path != "/" { + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeInvalidRequest, "invalid path: %q", hreq.URL.Path)) + return + } + + b, err := io.ReadAll(hreq.Body) if err != nil { - res := rpctypes.RPCInvalidRequestError(nil, - fmt.Errorf("error reading request body: %w", err), - ) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeInvalidRequest, "reading request body: %v", err)) return } // if its an empty request (like from a browser), just display a list of // functions if len(b) == 0 { - writeListOfEndpoints(w, r, funcMap) + writeListOfEndpoints(w, hreq, funcMap) return } - // first try to unmarshal the incoming request as an array of RPC requests - var ( - requests []rpctypes.RPCRequest - responses []rpctypes.RPCResponse - ) - if err := json.Unmarshal(b, &requests); err != nil { - // next, try to unmarshal as a single request - var request rpctypes.RPCRequest - if err := json.Unmarshal(b, &request); err != nil { - res := rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - return - } - requests = []rpctypes.RPCRequest{request} + requests, err := parseRequests(b) + if err != nil { + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeParseError, "decoding request: %v", err)) + return } - // Set the default response cache to true unless - // 1. Any RPC request rrror. - // 2. Any RPC request doesn't allow to be cached. - // 3. Any RPC request has the height argument and the value is 0 (the default). - var c = true - for _, request := range requests { - request := request - - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == nil { - logger.Debug( - "HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", - "req", request, - ) + var responses []rpctypes.RPCResponse + for _, req := range requests { + // Ignore notifications, which this service does not support. + if req.IsNotification() { + logger.Debug("Ignoring notification", "req", req) continue } - if len(r.URL.Path) > 1 { - responses = append( - responses, - rpctypes.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), - ) - c = false - continue - } - rpcFunc, ok := funcMap[request.Method] + + rpcFunc, ok := funcMap[req.Method] if !ok || rpcFunc.ws { - responses = append(responses, rpctypes.RPCMethodNotFoundError(request.ID)) - c = false + responses = append(responses, req.MakeErrorf(rpctypes.CodeMethodNotFound, req.Method)) continue } - ctx := &rpctypes.Context{JSONReq: &request, HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - responses = append( - responses, - rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), - ) - c = false - continue - } - args = append(args, fnArgs...) - } - - if hasDefaultHeight(request, args) { - c = false + req := req + ctx := rpctypes.WithCallInfo(hreq.Context(), &rpctypes.CallInfo{ + RPCRequest: &req, + HTTPRequest: hreq, + }) + args, err := parseParams(ctx, rpcFunc, req.Params) + if err != nil { + responses = append(responses, + req.MakeErrorf(rpctypes.CodeInvalidParams, "converting JSON parameters: %v", err)) + continue } returns := rpcFunc.f.Call(args) - logger.Debug("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - responses = append(responses, rpctypes.NewRPCSuccessResponse(request.ID, result)) - - // if this already of type RPC error then forward that error - case *rpctypes.RPCError: - responses = append(responses, rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) - c = false - default: // we need to unwrap the error and parse it accordingly - switch errors.Unwrap(err) { - // check if the error was due to an invald request - case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: - responses = append(responses, rpctypes.RPCInvalidRequestError(request.ID, err)) - c = false - // lastly default all remaining errors as internal errors - default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - responses = append(responses, rpctypes.RPCInternalError(request.ID, err)) - c = false - } - } - - if c && !rpcFunc.cache { - c = false + if err == nil { + responses = append(responses, req.MakeResponse(result)) + } else { + responses = append(responses, req.MakeError(err)) } } - if len(responses) > 0 { - if wErr := WriteRPCResponseHTTP(w, c, responses...); wErr != nil { - logger.Error("failed to write responses", "err", wErr) - } + if len(responses) == 0 { + return } + writeRPCResponse(w, logger, responses...) } } @@ -160,128 +106,168 @@ func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { } } -func mapParamsToArgs( - rpcFunc *RPCFunc, - params map[string]json.RawMessage, - argsOffset int, -) ([]reflect.Value, error) { +// parseRequests parses a JSON-RPC request or request batch from data. +func parseRequests(data []byte) ([]rpctypes.RPCRequest, error) { + var reqs []rpctypes.RPCRequest + var err error - values := make([]reflect.Value, len(rpcFunc.argNames)) - for i, argName := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] + isArray := bytes.HasPrefix(bytes.TrimSpace(data), []byte("[")) + if isArray { + err = json.Unmarshal(data, &reqs) + } else { + reqs = append(reqs, rpctypes.RPCRequest{}) + err = json.Unmarshal(data, &reqs[0]) + } + if err != nil { + return nil, err + } + return reqs, nil +} - if p, ok := params[argName]; ok && p != nil && len(p) > 0 { - val := reflect.New(argType) - err := tmjson.Unmarshal(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } else { // use default for that type - values[i] = reflect.Zero(argType) - } +// parseParams parses the JSON parameters of rpcReq into the arguments of fn, +// returning the corresponding argument values or an error. +func parseParams(ctx context.Context, fn *RPCFunc, paramData []byte) ([]reflect.Value, error) { + params, err := parseJSONParams(fn, paramData) + if err != nil { + return nil, err } - return values, nil -} + args := make([]reflect.Value, 1+len(params)) + args[0] = reflect.ValueOf(ctx) + for i, param := range params { + ptype := fn.args[i+1] + if len(param) == 0 { + args[i+1] = reflect.Zero(ptype) + continue + } -func arrayParamsToArgs( - rpcFunc *RPCFunc, - params []json.RawMessage, - argsOffset int, -) ([]reflect.Value, error) { + var pval reflect.Value + isPtr := ptype.Kind() == reflect.Ptr + if isPtr { + pval = reflect.New(ptype.Elem()) + } else { + pval = reflect.New(ptype) + } + baseType := pval.Type().Elem() - if len(rpcFunc.argNames) != len(params) { - return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", - len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) - } + if isIntType(baseType) && isStringValue(param) { + var z int64String + if err := json.Unmarshal(param, &z); err != nil { + return nil, fmt.Errorf("decoding string %q: %w", fn.argNames[i], err) + } + pval.Elem().Set(reflect.ValueOf(z).Convert(baseType)) + } else if err := json.Unmarshal(param, pval.Interface()); err != nil { + return nil, fmt.Errorf("decoding %q: %w", fn.argNames[i], err) + } - values := make([]reflect.Value, len(params)) - for i, p := range params { - argType := rpcFunc.args[i+argsOffset] - val := reflect.New(argType) - err := tmjson.Unmarshal(p, val.Interface()) - if err != nil { - return nil, err + if isPtr { + args[i+1] = pval + } else { + args[i+1] = pval.Elem() } - values[i] = val.Elem() } - return values, nil + return args, nil } -// raw is unparsed json (from json.RawMessage) encoding either a map or an -// array. -// -// Example: -// rpcFunc.args = [rpctypes.Context string] -// rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { - const argsOffset = 1 - - // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? - // First, try to get the map. - var m map[string]json.RawMessage - err := json.Unmarshal(raw, &m) - if err == nil { - return mapParamsToArgs(rpcFunc, m, argsOffset) - } +// parseJSONParams parses data and returns a slice of JSON values matching the +// positional parameters of fn. It reports an error if data is not "null" and +// does not encode an object or an array, or if the number of array parameters +// does not match the argument list of fn (excluding the context). +func parseJSONParams(fn *RPCFunc, data []byte) ([]json.RawMessage, error) { + base := bytes.TrimSpace(data) + if bytes.HasPrefix(base, []byte("{")) { + var m map[string]json.RawMessage + if err := json.Unmarshal(base, &m); err != nil { + return nil, fmt.Errorf("decoding parameter object: %w", err) + } + out := make([]json.RawMessage, len(fn.argNames)) + for i, name := range fn.argNames { + if p, ok := m[name]; ok { + out[i] = p + } + } + return out, nil - // Otherwise, try an array. - var a []json.RawMessage - err = json.Unmarshal(raw, &a) - if err == nil { - return arrayParamsToArgs(rpcFunc, a, argsOffset) + } else if bytes.HasPrefix(base, []byte("[")) { + var m []json.RawMessage + if err := json.Unmarshal(base, &m); err != nil { + return nil, fmt.Errorf("decoding parameter array: %w", err) + } + if len(m) != len(fn.argNames) { + return nil, fmt.Errorf("got %d parameters, want %d", len(m), len(fn.argNames)) + } + return m, nil + + } else if bytes.Equal(base, []byte("null")) { + return make([]json.RawMessage, len(fn.argNames)), nil } - // Otherwise, bad format, we cannot parse - return nil, fmt.Errorf("unknown type for JSON params: %v. Expected map or array", err) + return nil, errors.New("parameters must be an object or an array") +} + +// isStringValue reports whether data is a JSON string value. +func isStringValue(data json.RawMessage) bool { + return len(data) != 0 && data[0] == '"' +} + +type int64String int64 + +func (z *int64String) UnmarshalText(data []byte) error { + v, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return err + } + *z = int64String(v) + return nil } // writes a list of available rpc endpoints as an html page func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[string]*RPCFunc) { - noArgNames := []string{} - argNames := []string{} - for name, funcData := range funcMap { - if len(funcData.args) == 0 { - noArgNames = append(noArgNames, name) + hasArgs := make(map[string]string) + noArgs := make(map[string]string) + for name, rf := range funcMap { + base := fmt.Sprintf("//%s/%s", r.Host, name) + // N.B. Check argNames, not args, since the type list includes the type + // of the leading context argument. + if len(rf.argNames) == 0 { + noArgs[name] = base } else { - argNames = append(argNames, name) - } - } - sort.Strings(noArgNames) - sort.Strings(argNames) - buf := new(bytes.Buffer) - buf.WriteString("") - buf.WriteString("
Available endpoints:
") - - for _, name := range noArgNames { - link := fmt.Sprintf("//%s/%s", r.Host, name) - buf.WriteString(fmt.Sprintf("%s
", link, link)) - } - - buf.WriteString("
Endpoints that require arguments:
") - for _, name := range argNames { - link := fmt.Sprintf("//%s/%s?", r.Host, name) - funcData := funcMap[name] - for i, argName := range funcData.argNames { - link += argName + "=_" - if i < len(funcData.argNames)-1 { - link += "&" + query := append([]string(nil), rf.argNames...) + for i, arg := range query { + query[i] = arg + "=_" } + hasArgs[name] = base + "?" + strings.Join(query, "&") } - buf.WriteString(fmt.Sprintf("%s
", link, link)) } - buf.WriteString("") w.Header().Set("Content-Type", "text/html") - w.WriteHeader(200) - w.Write(buf.Bytes()) // nolint: errcheck + _ = listOfEndpoints.Execute(w, map[string]map[string]string{ + "NoArgs": noArgs, + "HasArgs": hasArgs, + }) } -func hasDefaultHeight(r rpctypes.RPCRequest, h []reflect.Value) bool { - switch r.Method { - case "block", "block_results", "commit", "consensus_params", "validators": - return len(h) < 2 || h[1].IsZero() - default: - return false - } -} +var listOfEndpoints = template.Must(template.New("list").Parse(` +List of RPC Endpoints + + +

Available RPC endpoints:

+ +{{if .NoArgs}} +
+

Endpoints with no arguments:

+ +
    +{{range $link := .NoArgs}}
  • {{$link}}
  • +{{end -}} +
{{end}} + +{{if .HasArgs}} +
+

Endpoints that require arguments:

+ +
    +{{range $link := .HasArgs}}
  • {{$link}}
  • +{{end -}} +
{{end}} + +`)) diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 64e7597fd1..1f5d2c320d 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -1,8 +1,9 @@ package server import ( + "context" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -17,8 +18,8 @@ import ( func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), - "block": NewRPCFunc(func(ctx *rpctypes.Context, h int) (string, error) { return "block", nil }, "height", true), + "c": NewRPCFunc(func(ctx context.Context, s string, i int) (string, error) { return "foo", nil }, "s", "i"), + "block": NewRPCFunc(func(ctx context.Context, h int) (string, error) { return "block", nil }, "height"), } mux := http.NewServeMux() logger := log.NewNopLogger() @@ -37,24 +38,24 @@ func TestRPCParams(t *testing.T) { tests := []struct { payload string wantErr string - expectedID interface{} + expectedID string }{ // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", `"0"`}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", `"0"`}, // id not captured in JSON parsing failures - {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": a}`, "invalid character", ""}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", `"0"`}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid syntax", `"0"`}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", `"0"`}, // no ID - notification // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", `"0"`}, + {`{"method": "c", "id": "0", "params": {}}`, "", `"0"`}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", `"0"`}, } for i, tt := range tests { @@ -62,19 +63,17 @@ func TestRPCParams(t *testing.T) { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - defer res.Body.Close() + // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } + blob, err := io.ReadAll(res.Body) + require.NoError(t, err, "#%d: reading body", i) + require.NoError(t, res.Body.Close()) recv := new(rpctypes.RPCResponse) assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID(), "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { @@ -90,19 +89,19 @@ func TestJSONRPCID(t *testing.T) { tests := []struct { payload string wantErr bool - expectedID interface{} + expectedID string }{ // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(-1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, `"0"`}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, `"abc"`}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, `0`}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, `1`}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, `-1`}, // bad id - {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, - {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, nil}, + {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, ""}, // object + {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, ""}, // array + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, true, ""}, // fractional } for i, tt := range tests { @@ -112,7 +111,7 @@ func TestJSONRPCID(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -121,10 +120,10 @@ func TestJSONRPCID(t *testing.T) { recv := new(rpctypes.RPCResponse) err = json.Unmarshal(blob, recv) - assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + assert.NoError(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID(), "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) @@ -142,9 +141,9 @@ func TestRPCNotification(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") } @@ -178,7 +177,7 @@ func TestRPCNotificationInBatch(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -237,11 +236,11 @@ func TestRPCResponseCache(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - require.Equal(t, "max-age=31536000", res.Header.Get("Cache-control")) + require.Equal(t, "", res.Header.Get("Cache-control")) - _, err := ioutil.ReadAll(res.Body) + _, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") // send a request with default height. body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) @@ -254,7 +253,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "", res.Header.Get("Cache-control")) - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 49e1e510ea..32917b8cb3 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -2,13 +2,12 @@ package server import ( - "bufio" + "context" "encoding/json" "errors" "fmt" "net" "net/http" - "os" "runtime/debug" "strings" "time" @@ -46,29 +45,46 @@ func DefaultConfig() *Config { } // Serve creates a http.Server and calls Serve with the given listener. It -// wraps handler with RecoverAndLogHandler and a handler, which limits the max -// body size to config.MaxBodyBytes. -// -// NOTE: This function blocks - you may want to call it in a go-routine. -func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { +// wraps handler to recover panics and limit the request body size. +func Serve( + ctx context.Context, + listener net.Listener, + handler http.Handler, + logger log.Logger, + config *Config, +) error { logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) + h := recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: h, ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, MaxHeaderBytes: config.MaxHeaderBytes, } - err := s.Serve(listener) - logger.Info("RPC HTTP server stopped", "err", err) - return err + sig := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + sctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _ = s.Shutdown(sctx) + case <-sig: + } + }() + + if err := s.Serve(listener); err != nil { + logger.Info("RPC HTTP server stopped", "err", err) + close(sig) + return err + } + return nil } // Serve creates a http.Server and calls ServeTLS with the given listener, -// certFile and keyFile. It wraps handler with RecoverAndLogHandler and a -// handler, which limits the max body size to config.MaxBodyBytes. -// -// NOTE: This function blocks - you may want to call it in a go-routine. +// certFile and keyFile. It wraps handler to recover panics and limit the +// request body size. func ServeTLS( + ctx context.Context, listener net.Listener, handler http.Handler, certFile, keyFile string, @@ -77,186 +93,182 @@ func ServeTLS( ) error { logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) + h := recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: h, ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, MaxHeaderBytes: config.MaxHeaderBytes, } - err := s.ServeTLS(listener, certFile, keyFile) + sig := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + sctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _ = s.Shutdown(sctx) + case <-sig: + } + }() - logger.Error("RPC HTTPS server stopped", "err", err) - return err + if err := s.ServeTLS(listener, certFile, keyFile); err != nil { + logger.Error("RPC HTTPS server stopped", "err", err) + close(sig) + return err + } + return nil } -// WriteRPCResponseHTTPError marshals res as JSON (with indent) and writes it -// to w. -// -// Maps JSON RPC error codes to HTTP Status codes as follows: -// -// HTTP Status code message -// 500 -32700 Parse error. -// 400 -32600 Invalid Request. -// 404 -32601 Method not found. -// 500 -32602 Invalid params. -// 500 -32603 Internal error. -// 500 -32099..-32000 Server error. +// writeInternalError writes an internal server error (500) to w with the text +// of err in the body. This is a fallback used when a handler is unable to +// write the expected response. +func writeInternalError(w http.ResponseWriter, err error) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, err.Error()) +} + +// writeHTTPResponse writes a JSON-RPC response to w. If rsp encodes an error, +// the response body is its error object; otherwise its responses is the result. // -// source: https://www.jsonrpc.org/historical/json-rpc-over-http.html -func WriteRPCResponseHTTPError( - w http.ResponseWriter, - res rpctypes.RPCResponse, -) error { - if res.Error == nil { - panic("tried to write http error response without RPC error") +// Unless there is an error encoding the response, the status is 200 OK. +func writeHTTPResponse(w http.ResponseWriter, log log.Logger, rsp rpctypes.RPCResponse) { + var body []byte + var err error + if rsp.Error != nil { + body, err = json.Marshal(rsp.Error) + } else { + body = rsp.Result } - - jsonBytes, err := json.MarshalIndent(res, "", " ") if err != nil { - return fmt.Errorf("json marshal: %w", err) - } - - var httpCode int - switch res.Error.Code { - case -32600: - httpCode = http.StatusBadRequest - case -32601: - httpCode = http.StatusNotFound - default: - httpCode = http.StatusInternalServerError + log.Error("Error encoding RPC response: %w", err) + writeInternalError(w, err) + return } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(httpCode) - _, err = w.Write(jsonBytes) - return err + w.WriteHeader(http.StatusOK) + _, _ = w.Write(body) } -// WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. -// If the rpc response can be cached, add cache-control to the response header. -func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...rpctypes.RPCResponse) error { - var v interface{} - if len(res) == 1 { - v = res[0] +// writeRPCResponse writes one or more JSON-RPC responses to w. A single +// response is encoded as an object, otherwise the response is sent as a batch +// (array) of response objects. +// +// Unless there is an error encoding the responses, the status is 200 OK. +func writeRPCResponse(w http.ResponseWriter, log log.Logger, rsps ...rpctypes.RPCResponse) { + var body []byte + var err error + if len(rsps) == 1 { + body, err = json.Marshal(rsps[0]) } else { - v = res + body, err = json.Marshal(rsps) } - - jsonBytes, err := json.MarshalIndent(v, "", " ") if err != nil { - return fmt.Errorf("json marshal: %w", err) + log.Error("Error encoding RPC response: %w", err) + writeInternalError(w, err) + return } w.Header().Set("Content-Type", "application/json") - if c { - w.Header().Set("Cache-Control", "max-age=31536000") // expired after one year - } - w.WriteHeader(200) - _, err = w.Write(jsonBytes) - return err + w.WriteHeader(http.StatusOK) + _, _ = w.Write(body) } //----------------------------------------------------------------------------- -// RecoverAndLogHandler wraps an HTTP handler, adding error logging. -// If the inner function panics, the outer function recovers, logs, sends an -// HTTP 500 error response. -func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { +// recoverAndLogHandler wraps an HTTP handler, adding error logging. If the +// inner handler panics, the wrapper recovers, logs, sends an HTTP 500 error +// response to the client. +func recoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Wrap the ResponseWriter to remember the status - rww := &responseWriterWrapper{-1, w} - begin := time.Now() - - rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + // Capture the HTTP status written by the handler. + var httpStatus int + rww := newStatusWriter(w, &httpStatus) + // Recover panics from inside handler and try to send the client + // 500 Internal server error. If the handler panicked after already + // sending a (partial) response, this is a no-op. defer func() { - // Handle any panics in the panic handler below. Does not use the logger, since we want - // to avoid any further panics. However, we try to return a 500, since it otherwise - // defaults to 200 and there is no other way to terminate the connection. If that - // should panic for whatever reason then the Go HTTP server will handle it and - // terminate the connection - panicing is the de-facto and only way to get the Go HTTP - // server to terminate the request and close the connection/stream: - // https://github.com/golang/go/issues/17790#issuecomment-258481416 - if e := recover(); e != nil { - fmt.Fprintf(os.Stderr, "Panic during RPC panic recovery: %v\n%v\n", e, string(debug.Stack())) - w.WriteHeader(500) + if v := recover(); v != nil { + var err error + switch e := v.(type) { + case error: + err = e + case string: + err = errors.New(e) + case fmt.Stringer: + err = errors.New(e.String()) + default: + err = fmt.Errorf("panic with value %v", v) + } + + logger.Error("Panic in RPC HTTP handler", + "err", err, "stack", string(debug.Stack())) + writeInternalError(rww, err) } }() + // Log timing and response information from the handler. + begin := time.Now() defer func() { - // Send a 500 error if a panic happens during a handler. - // Without this, Chrome & Firefox were retrying aborted ajax requests, - // at least to my localhost. - if e := recover(); e != nil { - - // If RPCResponse - if res, ok := e.(rpctypes.RPCResponse); ok { - if wErr := WriteRPCResponseHTTP(rww, false, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } else { - // Panics can contain anything, attempt to normalize it as an error. - var err error - switch e := e.(type) { - case error: - err = e - case string: - err = errors.New(e) - case fmt.Stringer: - err = errors.New(e.String()) - default: - } - - logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - - res := rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err) - if wErr := WriteRPCResponseHTTPError(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } - } - - // Finally, log. - durationMS := time.Since(begin).Nanoseconds() / 1000000 - if rww.Status == -1 { - rww.Status = 200 - } + elapsed := time.Since(begin) logger.Debug("served RPC HTTP response", "method", r.Method, "url", r.URL, - "status", rww.Status, - "duration", durationMS, + "status", httpStatus, + "duration-sec", elapsed.Seconds(), "remoteAddr", r.RemoteAddr, ) }() + rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) handler.ServeHTTP(rww, r) }) } -// Remember the status for logging -type responseWriterWrapper struct { - Status int - http.ResponseWriter +// MaxBytesHandler wraps h in a handler that limits the size of the request +// body to at most maxBytes. If maxBytes <= 0, the request body is not limited. +func MaxBytesHandler(h http.Handler, maxBytes int64) http.Handler { + if maxBytes <= 0 { + return h + } + return maxBytesHandler{handler: h, maxBytes: maxBytes} } -func (w *responseWriterWrapper) WriteHeader(status int) { - w.Status = status - w.ResponseWriter.WriteHeader(status) +type maxBytesHandler struct { + handler http.Handler + maxBytes int64 } -// implements http.Hijacker -func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return w.ResponseWriter.(http.Hijacker).Hijack() +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.Body = http.MaxBytesReader(w, req.Body, h.maxBytes) + h.handler.ServeHTTP(w, req) } -type maxBytesHandler struct { - h http.Handler - n int64 +// newStatusWriter wraps an http.ResponseWriter to capture the HTTP status code +// in *code. +func newStatusWriter(w http.ResponseWriter, code *int) statusWriter { + return statusWriter{ + ResponseWriter: w, + Hijacker: w.(http.Hijacker), + code: code, + } } -func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.Body = http.MaxBytesReader(w, r.Body, h.n) - h.h.ServeHTTP(w, r) +type statusWriter struct { + http.ResponseWriter + http.Hijacker // to support websocket upgrade + + code *int +} + +// WriteHeader implements part of http.ResponseWriter. It delegates to the +// wrapped writer, and as a side effect captures the written code. +// +// Note that if a request does not explicitly call WriteHeader, the code will +// not be updated. +func (w statusWriter) WriteHeader(code int) { + *w.code = code + w.ResponseWriter.WriteHeader(code) } // Listen starts a new net.Listener on the given address. diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 39e7135655..838a2ef6ca 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -1,10 +1,10 @@ package server import ( + "context" "crypto/tls" - "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,6 +28,13 @@ type sampleResult struct { func TestMaxOpenConnections(t *testing.T) { const max = 5 // max simultaneous connections + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // Start the server. var open int32 mux := http.NewServeMux() @@ -42,7 +50,8 @@ func TestMaxOpenConnections(t *testing.T) { l, err := Listen("tcp://127.0.0.1:0", max) require.NoError(t, err) defer l.Close() - go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests + + go Serve(ctx, l, mux, logger, config) //nolint:errcheck // ignore for tests // Make N GET calls to the server. attempts := max * 2 @@ -71,6 +80,8 @@ func TestMaxOpenConnections(t *testing.T) { } func TestServeTLS(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + ln, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) defer ln.Close() @@ -80,10 +91,17 @@ func TestServeTLS(t *testing.T) { fmt.Fprint(w, "some body") }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + chErr := make(chan error, 1) go func() { - // FIXME This goroutine leaks - chErr <- ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) + select { + case chErr <- ServeTLS(ctx, ln, mux, "test.crt", "test.key", logger, DefaultConfig()): + case <-ctx.Done(): + } }() select { @@ -101,82 +119,54 @@ func TestServeTLS(t *testing.T) { defer res.Body.Close() assert.Equal(t, http.StatusOK, res.StatusCode) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) require.NoError(t, err) assert.Equal(t, []byte("some body"), body) } -func TestWriteRPCResponseHTTP(t *testing.T) { - id := rpctypes.JSONRPCIntID(-1) +func TestWriteRPCResponse(t *testing.T) { + req := rpctypes.NewRequest(-1) // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) - require.NoError(t, err) + logger := log.NewNopLogger() + writeRPCResponse(w, logger, req.MakeResponse(&sampleResult{"hello"})) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err := io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, "max-age=31536000", resp.Header.Get("Cache-control")) - assert.Equal(t, `{ - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "hello" - } -}`, string(body)) + assert.Equal(t, "", resp.Header.Get("Cache-control")) + assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body)) // multiple arguments w = httptest.NewRecorder() - err = WriteRPCResponseHTTP(w, - false, - rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"}), - rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) - require.NoError(t, err) + writeRPCResponse(w, logger, + req.MakeResponse(&sampleResult{"hello"}), + req.MakeResponse(&sampleResult{"world"}), + ) resp = w.Result() - body, err = ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err = io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, `[ - { - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "hello" - } - }, - { - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "world" - } - } -]`, string(body)) + assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},`+ + `{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body)) } -func TestWriteRPCResponseHTTPError(t *testing.T) { +func TestWriteHTTPResponse(t *testing.T) { w := httptest.NewRecorder() - err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) - require.NoError(t, err) + logger := log.NewNopLogger() + req := rpctypes.NewRequest(-1) + writeHTTPResponse(w, logger, req.MakeErrorf(rpctypes.CodeInternalError, "foo")) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err := io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) - assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, http.StatusOK, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, `{ - "jsonrpc": "2.0", - "id": -1, - "error": { - "code": -32603, - "message": "Internal error", - "data": "foo" - } -}`, string(body)) + assert.Equal(t, `{"code":-32603,"message":"Internal error","data":"foo"}`, string(body)) } diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 07b3616b44..67491adf55 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -1,234 +1,178 @@ package server import ( + "context" "encoding/hex" - "errors" + "encoding/json" "fmt" "net/http" "reflect" - "regexp" + "strconv" "strings" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -// HTTP + URI handler - -var reInt = regexp.MustCompile(`^-?[0-9]+$`) +// uriReqID is a placeholder ID used for GET requests, which do not receive a +// JSON-RPC request ID from the caller. +const uriReqID = -1 // convert from a function name to the http handler func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { - // Always return -1 as there's no ID here. - dummyID := rpctypes.JSONRPCIntID(-1) // URIClientRequestID - - // Exception for websocket endpoints - if rpcFunc.ws { - return func(w http.ResponseWriter, r *http.Request) { - res := rpctypes.RPCMethodNotFoundError(dummyID) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } - } - - // All other endpoints - return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) - - ctx := &rpctypes.Context{HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - - fnArgs, err := httpParamsToArgs(rpcFunc, r) + return func(w http.ResponseWriter, req *http.Request) { + ctx := rpctypes.WithCallInfo(req.Context(), &rpctypes.CallInfo{ + HTTPRequest: req, + }) + args, err := parseURLParams(ctx, rpcFunc, req) if err != nil { - res := rpctypes.RPCInvalidParamsError(dummyID, - fmt.Errorf("error converting http params to arguments: %w", err), - ) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintln(w, err.Error()) return } - args = append(args, fnArgs...) - - returns := rpcFunc.f.Call(args) - - logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) - result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - res := rpctypes.NewRPCSuccessResponse(dummyID, result) - if wErr := WriteRPCResponseHTTP(w, rpcFunc.cache, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - - // if this already of type RPC error then forward that error. - case *rpctypes.RPCError: - res := rpctypes.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - - default: // we need to unwrap the error and parse it accordingly - var res rpctypes.RPCResponse - - switch errors.Unwrap(err) { - case coretypes.ErrZeroOrNegativeHeight, - coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, - coretypes.ErrInvalidRequest: - res = rpctypes.RPCInvalidRequestError(dummyID, err) - default: // ctypes.ErrHeightNotAvailable, ctypes.ErrHeightExceedsChainHead: - res = rpctypes.RPCInternalError(dummyID, err) - } - - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + jreq := rpctypes.NewRequest(uriReqID) + outs := rpcFunc.f.Call(args) + + logger.Debug("HTTPRestRPC", "method", req.URL.Path, "args", args, "returns", outs) + result, err := unreflectResult(outs) + if err == nil { + writeHTTPResponse(w, logger, jreq.MakeResponse(result)) + } else { + writeHTTPResponse(w, logger, jreq.MakeError(err)) } - } } -// Covert an http query to a list of properly typed values. -// To be properly decoded the arg must be a concrete type from tendermint (if its an interface). -func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { - // skip types.Context - const argsOffset = 1 - - values := make([]reflect.Value, len(rpcFunc.argNames)) - - for i, name := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] - - values[i] = reflect.Zero(argType) // set default for that type +func parseURLParams(ctx context.Context, rf *RPCFunc, req *http.Request) ([]reflect.Value, error) { + if err := req.ParseForm(); err != nil { + return nil, fmt.Errorf("invalid HTTP request: %w", err) + } + getArg := func(name string) (string, bool) { + if req.Form.Has(name) { + return req.Form.Get(name), true + } + return "", false + } - arg := getParam(r, name) - // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) + vals := make([]reflect.Value, len(rf.argNames)+1) + vals[0] = reflect.ValueOf(ctx) + for i, name := range rf.argNames { + atype := rf.args[i+1] - if arg == "" { + text, ok := getArg(name) + if !ok { + vals[i+1] = reflect.Zero(atype) continue } - v, ok, err := nonJSONStringToArg(argType, arg) + val, err := parseArgValue(atype, text) if err != nil { - return nil, err + return nil, fmt.Errorf("decoding parameter %q: %w", name, err) } - if ok { - values[i] = v - continue + vals[i+1] = val + } + return vals, nil +} + +func parseArgValue(atype reflect.Type, text string) (reflect.Value, error) { + // Regardless whether the argument is a pointer type, allocate a pointer so + // we can set the computed value. + var out reflect.Value + isPtr := atype.Kind() == reflect.Ptr + if isPtr { + out = reflect.New(atype.Elem()) + } else { + out = reflect.New(atype) + } + + baseType := out.Type().Elem() + if isIntType(baseType) { + // Integral type: Require a base-10 digit string. For compatibility with + // existing use allow quotation marks. + v, err := decodeInteger(text) + if err != nil { + return reflect.Value{}, fmt.Errorf("invalid integer: %w", err) } + out.Elem().Set(reflect.ValueOf(v).Convert(baseType)) + } else if isStringOrBytes(baseType) { + // String or byte slice: Check for quotes, hex encoding. + dec, err := decodeString(text) + if err != nil { + return reflect.Value{}, err + } + out.Elem().Set(reflect.ValueOf(dec).Convert(baseType)) - values[i], err = jsonStringToArg(argType, arg) + } else if baseType.Kind() == reflect.Bool { + b, err := strconv.ParseBool(text) if err != nil { - return nil, err + return reflect.Value{}, fmt.Errorf("invalid boolean: %w", err) } - } + out.Elem().Set(reflect.ValueOf(b)) - return values, nil -} + } else { + // We don't know how to represent other types. + return reflect.Value{}, fmt.Errorf("unsupported argument type %v", baseType) + } -func jsonStringToArg(rt reflect.Type, arg string) (reflect.Value, error) { - rv := reflect.New(rt) - err := tmjson.Unmarshal([]byte(arg), rv.Interface()) - if err != nil { - return rv, err + // If the argument wants a pointer, return the value as-is, otherwise + // indirect the pointer back off. + if isPtr { + return out, nil } - rv = rv.Elem() - return rv, nil + return out.Elem(), nil } -func nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { - if rt.Kind() == reflect.Ptr { - rv1, ok, err := nonJSONStringToArg(rt.Elem(), arg) - switch { - case err != nil: - return reflect.Value{}, false, err - case ok: - rv := reflect.New(rt.Elem()) - rv.Elem().Set(rv1) - return rv, true, nil - default: - return reflect.Value{}, false, nil - } - } else { - return _nonJSONStringToArg(rt, arg) +var uint64Type = reflect.TypeOf(uint64(0)) + +// isIntType reports whether atype is an integer-shaped type. +func isIntType(atype reflect.Type) bool { + switch atype.Kind() { + case reflect.Float32, reflect.Float64: + return false + default: + return atype.ConvertibleTo(uint64Type) } } -// NOTE: rt.Kind() isn't a pointer. -func _nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { - isIntString := reInt.Match([]byte(arg)) - isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) - isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") - - var expectingString, expectingByteSlice, expectingInt bool - switch rt.Kind() { - case reflect.Int, - reflect.Uint, - reflect.Int8, - reflect.Uint8, - reflect.Int16, - reflect.Uint16, - reflect.Int32, - reflect.Uint32, - reflect.Int64, - reflect.Uint64: - expectingInt = true +// isStringOrBytes reports whether atype is a string or []byte. +func isStringOrBytes(atype reflect.Type) bool { + switch atype.Kind() { case reflect.String: - expectingString = true + return true case reflect.Slice: - expectingByteSlice = rt.Elem().Kind() == reflect.Uint8 + return atype.Elem().Kind() == reflect.Uint8 + default: + return false } +} - if isIntString && expectingInt { - qarg := `"` + arg + `"` - rv, err := jsonStringToArg(rt, qarg) - if err != nil { - return rv, false, err - } - - return rv, true, nil - } - - if isHexString { - if !expectingString && !expectingByteSlice { - err := fmt.Errorf("got a hex string arg, but expected '%s'", - rt.Kind().String()) - return reflect.ValueOf(nil), false, err - } - - var value []byte - value, err := hex.DecodeString(arg[2:]) - if err != nil { - return reflect.ValueOf(nil), false, err - } - if rt.Kind() == reflect.String { - return reflect.ValueOf(string(value)), true, nil - } - return reflect.ValueOf(value), true, nil - } +// isQuotedString reports whether s is enclosed in double quotes. +func isQuotedString(s string) bool { + return len(s) >= 2 && strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`) +} - if isQuotedString && expectingByteSlice { - v := reflect.New(reflect.TypeOf("")) - err := tmjson.Unmarshal([]byte(arg), v.Interface()) - if err != nil { - return reflect.ValueOf(nil), false, err - } - v = v.Elem() - return reflect.ValueOf([]byte(v.String())), true, nil +// decodeInteger decodes s into an int64. If s is "double quoted" the quotes +// are removed; otherwise s must be a base-10 digit string. +func decodeInteger(s string) (int64, error) { + if isQuotedString(s) { + s = s[1 : len(s)-1] } - - return reflect.ValueOf(nil), false, nil + return strconv.ParseInt(s, 10, 64) } -func getParam(r *http.Request, param string) string { - s := r.URL.Query().Get(param) - if s == "" { - s = r.FormValue(param) +// decodeString decodes s into a byte slice. If s has an 0x prefix, it is +// treated as a hex-encoded string. If it is "double quoted" it is treated as a +// JSON string value. Otherwise, s is converted to bytes directly. +func decodeString(s string) ([]byte, error) { + if lc := strings.ToLower(s); strings.HasPrefix(lc, "0x") { + return hex.DecodeString(lc[2:]) + } else if isQuotedString(s) { + var dec string + if err := json.Unmarshal([]byte(s), &dec); err != nil { + return nil, fmt.Errorf("invalid quoted string: %w", err) + } + return []byte(dec), nil } - return s + return []byte(s), nil } diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index 92ea6f2c0f..ee0ab5d796 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -1,6 +1,7 @@ package server import ( + "context" "encoding/json" "fmt" "net/http" @@ -10,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/bytes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -19,7 +19,7 @@ func TestParseJSONMap(t *testing.T) { // naive is float,string var p1 map[string]interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p1["height"].(float64) if assert.True(t, ok, "%#v", p1["height"]) { assert.EqualValues(t, 22, h) @@ -37,7 +37,7 @@ func TestParseJSONMap(t *testing.T) { "height": &tmp, } err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p2["height"].(float64) if assert.True(t, ok, "%#v", p2["height"]) { assert.EqualValues(t, 22, h) @@ -59,7 +59,7 @@ func TestParseJSONMap(t *testing.T) { Value: &bytes.HexBytes{}, } err = json.Unmarshal(input, &p3) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p3.Height.(*int) if assert.True(t, ok, "%#v", p3.Height) { assert.Equal(t, 22, *h) @@ -76,7 +76,7 @@ func TestParseJSONMap(t *testing.T) { Height int `json:"height"` }{} err = json.Unmarshal(input, &p4) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.EqualValues(t, 22, p4.Height) assert.EqualValues(t, []byte{0x12, 0x34}, p4.Value) } @@ -85,16 +85,16 @@ func TestParseJSONMap(t *testing.T) { // dynamic keys on map, and we can deserialize to the desired types var p5 map[string]*json.RawMessage err = json.Unmarshal(input, &p5) - if assert.Nil(t, err) { + if assert.NoError(t, err) { var h int err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.Equal(t, 22, h) } var v bytes.HexBytes err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.Equal(t, bytes.HexBytes{0x12, 0x34}, v) } } @@ -106,7 +106,7 @@ func TestParseJSONArray(t *testing.T) { // naive is float,string var p1 []interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { v, ok := p1[0].(string) if assert.True(t, ok, "%#v", p1[0]) { assert.EqualValues(t, "1234", v) @@ -121,7 +121,7 @@ func TestParseJSONArray(t *testing.T) { tmp := 0 p2 := []interface{}{&bytes.HexBytes{}, &tmp} err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { v, ok := p2[0].(*bytes.HexBytes) if assert.True(t, ok, "%#v", p2[0]) { assert.EqualValues(t, []byte{0x12, 0x34}, *v) @@ -134,8 +134,8 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *rpctypes.Context, height int, name string) {} - call := NewRPCFunc(demo, "height,name", false) + demo := func(ctx context.Context, height int, name string) error { return nil } + call := NewRPCFunc(demo, "height", "name") cases := []struct { raw string @@ -153,17 +153,17 @@ func TestParseJSONRPC(t *testing.T) { {`[7,"flew",100]`, 0, "", true}, {`{"name": -12, "height": "fred"}`, 0, "", true}, } + ctx := context.Background() for idx, tc := range cases { i := strconv.Itoa(idx) - data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, data) + vals, err := parseParams(ctx, call, []byte(tc.raw)) if tc.fail { - assert.NotNil(t, err, i) + assert.Error(t, err, i) } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) + assert.NoError(t, err, "%s: %+v", i, err) + if assert.Equal(t, 3, len(vals), i) { // ctx, height, name + assert.Equal(t, tc.height, vals[1].Int(), i) + assert.Equal(t, tc.name, vals[2].String(), i) } } @@ -171,8 +171,8 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - demo := func(ctx *rpctypes.Context, height int, name string) {} - call := NewRPCFunc(demo, "height,name", false) + demo := func(ctx context.Context, height int, name string) error { return nil } + call := NewRPCFunc(demo, "height", "name") cases := []struct { raw []string @@ -187,8 +187,15 @@ func TestParseURI(t *testing.T) { // can parse numbers quoted, too {[]string{`"7"`, `"flew"`}, 7, "flew", false}, {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, - // cant parse strings uquoted - {[]string{`"-10"`, `bob`}, -10, "bob", true}, + // can parse strings hex-escaped, in either case + {[]string{`-9`, `0x626f62`}, -9, "bob", false}, + {[]string{`-9`, `0X646F7567`}, -9, "doug", false}, + // can parse strings unquoted (as per OpenAPI docs) + {[]string{`0`, `hey you`}, 0, "hey you", false}, + // fail for invalid numbers, strings, hex + {[]string{`"-xx"`, `bob`}, 0, "", true}, // bad number + {[]string{`"95""`, `"bob`}, 0, "", true}, // bad string + {[]string{`15`, `0xa`}, 0, "", true}, // bad hex } for idx, tc := range cases { i := strconv.Itoa(idx) @@ -198,14 +205,14 @@ func TestParseURI(t *testing.T) { tc.raw[0], tc.raw[1]) req, err := http.NewRequest("GET", url, nil) assert.NoError(t, err) - vals, err := httpParamsToArgs(call, req) + vals, err := parseURLParams(context.Background(), call, req) if tc.fail { - assert.NotNil(t, err, i) + assert.Error(t, err, i) } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) + assert.NoError(t, err, "%s: %+v", i, err) + if assert.Equal(t, 3, len(vals), i) { + assert.Equal(t, tc.height, vals[1].Int(), i) + assert.Equal(t, tc.name, vals[2].String(), i) } } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 24f3c89761..a58973c6eb 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -1,86 +1,114 @@ package server import ( + "context" + "errors" + "fmt" "net/http" "reflect" - "strings" "github.com/tendermint/tendermint/libs/log" ) -// RegisterRPCFuncs adds a route for each function in the funcMap, as well as -// general jsonrpc and websocket handlers for all functions. "result" is the -// interface on which the result objects are registered, and is popualted with -// every RPCResponse +// RegisterRPCFuncs adds a route to mux for each non-websocket function in the +// funcMap, and also a root JSON-RPC POST handler. func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { - // HTTP endpoints - for funcName, rpcFunc := range funcMap { - mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger)) + for name, fn := range funcMap { + if fn.ws { + continue // skip websocket endpoints, not usable via GET calls + } + mux.HandleFunc("/"+name, makeHTTPHandler(fn, logger)) } - // JSONRPC endpoints + // Endpoints for POST. mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) } // Function introspection -// RPCFunc contains the introspected type information for a function +// RPCFunc contains the introspected type information for a function. type RPCFunc struct { f reflect.Value // underlying rpc function args []reflect.Type // type of each function arg returns []reflect.Type // type of each return arg argNames []string // name of each argument ws bool // websocket only - cache bool // allow the RPC response can be cached by the proxy cache server } -// NewRPCFunc wraps a function for introspection. -// f is the function, args are comma separated argument names -// cache is a bool value to allow the client proxy server to cache the RPC results -func NewRPCFunc(f interface{}, args string, cache bool) *RPCFunc { - return newRPCFunc(f, args, false, cache) +// NewRPCFunc constructs an RPCFunc for f, which must be a function whose type +// signature matches one of these schemes: +// +// func(context.Context, T1, T2, ...) error +// func(context.Context, T1, T2, ...) (R, error) +// +// for arbitrary types T_i and R. The number of argNames must exactly match the +// number of non-context arguments to f. Otherwise, NewRPCFunc panics. +// +// The parameter names given are used to map JSON object keys to the +// corresonding parameter of the function. The names do not need to match the +// declared names, but must match what the client sends in a request. +func NewRPCFunc(f interface{}, argNames ...string) *RPCFunc { + rf, err := newRPCFunc(f, argNames) + if err != nil { + panic("invalid RPC function: " + err.Error()) + } + return rf } -// NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, true, false) +// NewWSRPCFunc behaves as NewRPCFunc, but marks the resulting function for use +// via websocket. +func NewWSRPCFunc(f interface{}, argNames ...string) *RPCFunc { + rf := NewRPCFunc(f, argNames...) + rf.ws = true + return rf } -func newRPCFunc(f interface{}, args string, ws bool, c bool) *RPCFunc { - var argNames []string - if args != "" { - argNames = strings.Split(args, ",") +var ( + ctxType = reflect.TypeOf((*context.Context)(nil)).Elem() + errType = reflect.TypeOf((*error)(nil)).Elem() +) + +// newRPCFunc constructs an RPCFunc for f. See the comment at NewRPCFunc. +func newRPCFunc(f interface{}, argNames []string) (*RPCFunc, error) { + if f == nil { + return nil, errors.New("nil function") } - return &RPCFunc{ - f: reflect.ValueOf(f), - args: funcArgTypes(f), - returns: funcReturnTypes(f), - argNames: argNames, - ws: ws, - cache: c, + + // Check the type and signature of f. + fv := reflect.ValueOf(f) + if fv.Kind() != reflect.Func { + return nil, errors.New("not a function") } -} -// return a function's argument types -func funcArgTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumIn() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.In(i) + ft := fv.Type() + if np := ft.NumIn(); np == 0 { + return nil, errors.New("wrong number of parameters") + } else if ft.In(0) != ctxType { + return nil, errors.New("first parameter is not context.Context") + } else if np-1 != len(argNames) { + return nil, fmt.Errorf("have %d names for %d parameters", len(argNames), np-1) + } + + if no := ft.NumOut(); no < 1 || no > 2 { + return nil, errors.New("wrong number of results") + } else if ft.Out(no-1) != errType { + return nil, errors.New("last result is not error") } - return typez -} -// return a function's return types -func funcReturnTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumOut() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.Out(i) + args := make([]reflect.Type, ft.NumIn()) + for i := 0; i < ft.NumIn(); i++ { + args[i] = ft.In(i) } - return typez + outs := make([]reflect.Type, ft.NumOut()) + for i := 0; i < ft.NumOut(); i++ { + outs[i] = ft.Out(i) + } + return &RPCFunc{ + f: fv, + args: args, + returns: outs, + argNames: argNames, + }, nil } //------------------------------------------------------------- diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index 2271d03f86..3fc86b86fe 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -3,18 +3,14 @@ package server import ( "context" "encoding/json" - "errors" "fmt" "net/http" - "reflect" "runtime/debug" "time" "github.com/gorilla/websocket" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -41,6 +37,7 @@ type WebsocketManager struct { // NewWebsocketManager returns a new WebsocketManager that passes a map of // functions, connection options and logger to new WS connections. func NewWebsocketManager( + logger log.Logger, funcMap map[string]*RPCFunc, wsConnOptions ...func(*wsConnection), ) *WebsocketManager { @@ -60,22 +57,18 @@ func NewWebsocketManager( return true }, }, - logger: log.NewNopLogger(), + logger: logger, wsConnOptions: wsConnOptions, } } -// SetLogger sets the logger. -func (wm *WebsocketManager) SetLogger(l log.Logger) { - wm.logger = l -} - // WebsocketHandler upgrades the request/response (via http.Hijack) and starts // the wsConnection. func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { wsConn, err := wm.Upgrade(w, r, nil) if err != nil { - // TODO - return http error + // The upgrader has already reported an HTTP error to the client, so we + // need only log it. wm.logger.Error("Failed to upgrade connection", "err", err) return } @@ -87,14 +80,17 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // register connection logger := wm.logger.With("remote", wsConn.RemoteAddr()) - con := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) - wm.logger.Info("New websocket connection", "remote", con.remoteAddr) - err = con.Start() // BLOCKING - if err != nil { + conn := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) + wm.logger.Info("New websocket connection", "remote", conn.remoteAddr) + + // starting the conn is blocking + if err = conn.Start(r.Context()); err != nil { wm.logger.Error("Failed to start connection", "err", err) + writeInternalError(w, err) return } - if err := con.Stop(); err != nil { + + if err := conn.Stop(); err != nil { wm.logger.Error("error while stopping connection", "error", err) } } @@ -106,7 +102,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // // In case of an error, the connection is stopped. type wsConnection struct { - *client.RunState + Logger log.Logger remoteAddr string baseConn *websocket.Conn @@ -119,12 +115,6 @@ type wsConnection struct { funcMap map[string]*RPCFunc - // write channel capacity - writeChanCapacity int - - // each write times out after this. - writeWait time.Duration - // Connection times out if we haven't received *anything* in this long, not even pings. readWait time.Duration @@ -154,15 +144,13 @@ func newWSConnection( options ...func(*wsConnection), ) *wsConnection { wsc := &wsConnection{ - RunState: client.NewRunState("wsConnection", logger), - remoteAddr: baseConn.RemoteAddr().String(), - baseConn: baseConn, - funcMap: funcMap, - writeWait: defaultWSWriteWait, - writeChanCapacity: defaultWSWriteChanCapacity, - readWait: defaultWSReadWait, - pingPeriod: defaultWSPingPeriod, - readRoutineQuit: make(chan struct{}), + Logger: logger, + remoteAddr: baseConn.RemoteAddr().String(), + baseConn: baseConn, + funcMap: funcMap, + readWait: defaultWSReadWait, + pingPeriod: defaultWSPingPeriod, + readRoutineQuit: make(chan struct{}), } for _, option := range options { option(wsc) @@ -179,22 +167,6 @@ func OnDisconnect(onDisconnect func(remoteAddr string)) func(*wsConnection) { } } -// WriteWait sets the amount of time to wait before a websocket write times out. -// It should only be used in the constructor - not Goroutine-safe. -func WriteWait(writeWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeWait = writeWait - } -} - -// WriteChanCapacity sets the capacity of the websocket write channel. -// It should only be used in the constructor - not Goroutine-safe. -func WriteChanCapacity(cap int) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeChanCapacity = cap - } -} - // ReadWait sets the amount of time to wait before a websocket read times out. // It should only be used in the constructor - not Goroutine-safe. func ReadWait(readWait time.Duration) func(*wsConnection) { @@ -220,25 +192,19 @@ func ReadLimit(readLimit int64) func(*wsConnection) { } // Start starts the client service routines and blocks until there is an error. -func (wsc *wsConnection) Start() error { - if err := wsc.RunState.Start(); err != nil { - return err - } - wsc.writeChan = make(chan rpctypes.RPCResponse, wsc.writeChanCapacity) +func (wsc *wsConnection) Start(ctx context.Context) error { + wsc.writeChan = make(chan rpctypes.RPCResponse, defaultWSWriteChanCapacity) // Read subscriptions/unsubscriptions to events - go wsc.readRoutine() + go wsc.readRoutine(ctx) // Write responses, BLOCKING. - wsc.writeRoutine() + wsc.writeRoutine(ctx) return nil } // Stop unsubscribes the remote from all subscriptions. func (wsc *wsConnection) Stop() error { - if err := wsc.RunState.Stop(); err != nil { - return err - } if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } @@ -259,8 +225,6 @@ func (wsc *wsConnection) GetRemoteAddr() string { // It implements WSRPCConnection. It is Goroutine-safe. func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) error { select { - case <-wsc.Quit(): - return errors.New("connection was stopped") case <-ctx.Done(): return ctx.Err() case wsc.writeChan <- resp: @@ -271,9 +235,9 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPC // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp rpctypes.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) bool { select { - case <-wsc.Quit(): + case <-ctx.Done(): return false case wsc.writeChan <- resp: return true @@ -293,7 +257,7 @@ func (wsc *wsConnection) Context() context.Context { } // Read from the socket and subscribe to or unsubscribe from events -func (wsc *wsConnection) readRoutine() { +func (wsc *wsConnection) readRoutine(ctx context.Context) { // readRoutine will block until response is written or WS connection is closed writeCtx := context.Background() @@ -303,11 +267,13 @@ func (wsc *wsConnection) readRoutine() { if !ok { err = fmt.Errorf("WSJSONRPC: %v", r) } + req := rpctypes.NewRequest(uriReqID) wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + if err := wsc.WriteRPCResponse(writeCtx, + req.MakeErrorf(rpctypes.CodeInternalError, "Panic in handler: %v", err)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } - go wsc.readRoutine() + go wsc.readRoutine(ctx) } }() @@ -317,7 +283,7 @@ func (wsc *wsConnection) readRoutine() { for { select { - case <-wsc.Quit(): + case <-ctx.Done(): return default: // reset deadline for every type of message (control or data) @@ -333,7 +299,7 @@ func (wsc *wsConnection) readRoutine() { wsc.Logger.Error("Failed to read request", "err", err) } if err := wsc.Stop(); err != nil { - wsc.Logger.Error("Error closing websocket connection", "err", err) + wsc.Logger.Error("error closing websocket connection", "err", err) } close(wsc.readRoutineQuit) return @@ -344,15 +310,15 @@ func (wsc *wsConnection) readRoutine() { err = dec.Decode(&request) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + request.MakeErrorf(rpctypes.CodeParseError, "unmarshaling request: %v", err)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } continue } // A Notification is a Request object without an "id" member. // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == nil { + if request.IsNotification() { wsc.Logger.Debug( "WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", "req", request, @@ -363,25 +329,24 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + if err := wsc.WriteRPCResponse(writeCtx, + request.MakeErrorf(rpctypes.CodeMethodNotFound, request.Method)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } continue } - ctx := &rpctypes.Context{JSONReq: &request, WSConn: wsc} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - if err := wsc.WriteRPCResponse(writeCtx, - rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), - ); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) - } - continue + fctx := rpctypes.WithCallInfo(wsc.Context(), &rpctypes.CallInfo{ + RPCRequest: &request, + WSConn: wsc, + }) + args, err := parseParams(fctx, rpcFunc, request.Params) + if err != nil { + if err := wsc.WriteRPCResponse(writeCtx, request.MakeErrorf(rpctypes.CodeInvalidParams, + "converting JSON parameters: %v", err)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } - args = append(args, fnArgs...) + continue } returns := rpcFunc.f.Call(args) @@ -391,38 +356,20 @@ func (wsc *wsConnection) readRoutine() { var resp rpctypes.RPCResponse result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - resp = rpctypes.NewRPCSuccessResponse(request.ID, result) - - // if this already of type RPC error then forward that error - case *rpctypes.RPCError: - resp = rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) - - default: // we need to unwrap the error and parse it accordingly - switch errors.Unwrap(err) { - // check if the error was due to an invald request - case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: - resp = rpctypes.RPCInvalidRequestError(request.ID, err) - - // lastly default all remaining errors as internal errors - default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - resp = rpctypes.RPCInternalError(request.ID, err) - } + if err == nil { + resp = request.MakeResponse(result) + } else { + resp = request.MakeError(err) } - if err := wsc.WriteRPCResponse(writeCtx, resp); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + wsc.Logger.Error("error writing RPC response", "err", err) } - } } } // receives on a write channel and writes out on the socket -func (wsc *wsConnection) writeRoutine() { +func (wsc *wsConnection) writeRoutine(ctx context.Context) { pingTicker := time.NewTicker(wsc.pingPeriod) defer pingTicker.Stop() @@ -438,7 +385,7 @@ func (wsc *wsConnection) writeRoutine() { for { select { - case <-wsc.Quit(): + case <-ctx.Done(): return case <-wsc.readRoutineQuit: // error in readRoutine return @@ -454,13 +401,13 @@ func (wsc *wsConnection) writeRoutine() { return } case msg := <-wsc.writeChan: - jsonBytes, err := json.MarshalIndent(msg, "", " ") + data, err := json.Marshal(msg) if err != nil { - wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) + wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "msg", msg, "err", err) continue } - if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "err", err, "msg", msg) + if err = wsc.writeMessageWithDeadline(websocket.TextMessage, data); err != nil { + wsc.Logger.Error("Failed to write response", "msg", msg, "err", err) return } } @@ -471,7 +418,7 @@ func (wsc *wsConnection) writeRoutine() { // If some writes don't set it while others do, they may timeout incorrectly // (https://github.com/tendermint/tendermint/issues/553) func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { - if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { + if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(defaultWSWriteWait)); err != nil { return err } return wsc.baseConn.WriteMessage(msgType, msg) diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index b691172a40..ae73a953be 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -1,10 +1,12 @@ package server import ( + "context" "net/http" "net/http/httptest" "testing" + "github.com/fortytw2/leaktest" "github.com/gorilla/websocket" "github.com/stretchr/testify/require" @@ -13,9 +15,13 @@ import ( ) func TestWebsocketManagerHandler(t *testing.T) { - s := newWSServer() + logger := log.NewNopLogger() + + s := newWSServer(t, logger) defer s.Close() + t.Cleanup(leaktest.Check(t)) + // check upgrader works d := websocket.Dialer{} c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) @@ -26,14 +32,9 @@ func TestWebsocketManagerHandler(t *testing.T) { } // check basic functionality works - req, err := rpctypes.MapToRequest( - rpctypes.JSONRPCStringID("TestWebsocketManager"), - "c", - map[string]interface{}{"s": "a", "i": 10}, - ) - require.NoError(t, err) - err = c.WriteJSON(req) - require.NoError(t, err) + req := rpctypes.NewRequest(1001) + require.NoError(t, req.SetMethodAndParams("c", map[string]interface{}{"s": "a", "i": 10})) + require.NoError(t, c.WriteJSON(req)) var resp rpctypes.RPCResponse err = c.ReadJSON(&resp) @@ -42,15 +43,18 @@ func TestWebsocketManagerHandler(t *testing.T) { dialResp.Body.Close() } -func newWSServer() *httptest.Server { +func newWSServer(t *testing.T, logger log.Logger) *httptest.Server { funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(ctx context.Context, s string, i int) (string, error) { return "foo", nil }, "s", "i"), } - wm := NewWebsocketManager(funcMap) - wm.SetLogger(log.TestingLogger()) + wm := NewWebsocketManager(logger, funcMap) mux := http.NewServeMux() mux.HandleFunc("/websocket", wm.WebsocketHandler) - return httptest.NewServer(mux) + srv := httptest.NewServer(mux) + + t.Cleanup(srv.Close) + + return srv } diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index d348e1639c..844fa96dc0 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -1,20 +1,22 @@ package main import ( + "context" "fmt" "net/http" + "os" + "os/signal" + "syscall" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var routes = map[string]*rpcserver.RPCFunc{ - "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num", false), + "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name", "num"), } -func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { +func HelloWorld(ctx context.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } @@ -25,20 +27,22 @@ type Result struct { func main() { var ( mux = http.NewServeMux() - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) ) - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() {}) + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { - tmos.Exit(err.Error()) + logger.Error("rpc listening", "err", err) + os.Exit(1) } - if err = rpcserver.Serve(listener, mux, logger, config); err != nil { - tmos.Exit(err.Error()) + if err = rpcserver.Serve(ctx, listener, mux, logger, config); err != nil { + logger.Error("rpc serve", "err", err) + os.Exit(1) } } diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go index 4435c8c5d5..0c0500bf0b 100644 --- a/rpc/jsonrpc/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -1,137 +1,176 @@ package types import ( + "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" - "reflect" + "regexp" + "strconv" "strings" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/rpc/coretypes" ) -// a wrapper to emulate a sum type: jsonrpcid = string | int -// TODO: refactor when Go 2.0 arrives https://github.com/golang/go/issues/19412 -type jsonrpcid interface { - isJSONRPCID() -} +// ErrorCode is the type of JSON-RPC error codes. +type ErrorCode int -// JSONRPCStringID a wrapper for JSON-RPC string IDs -type JSONRPCStringID string - -func (JSONRPCStringID) isJSONRPCID() {} -func (id JSONRPCStringID) String() string { return string(id) } - -// JSONRPCIntID a wrapper for JSON-RPC integer IDs -type JSONRPCIntID int - -func (JSONRPCIntID) isJSONRPCID() {} -func (id JSONRPCIntID) String() string { return fmt.Sprintf("%d", id) } - -func idFromInterface(idInterface interface{}) (jsonrpcid, error) { - switch id := idInterface.(type) { - case string: - return JSONRPCStringID(id), nil - case float64: - // json.Unmarshal uses float64 for all numbers - // (https://golang.org/pkg/encoding/json/#Unmarshal), - // but the JSONRPC2.0 spec says the id SHOULD NOT contain - // decimals - so we truncate the decimals here. - return JSONRPCIntID(int(id)), nil - default: - typ := reflect.TypeOf(id) - return nil, fmt.Errorf("json-rpc ID (%v) is of unknown type (%v)", id, typ) +func (e ErrorCode) String() string { + if s, ok := errorCodeString[e]; ok { + return s } + return fmt.Sprintf("server error: code %d", e) +} + +// Constants defining the standard JSON-RPC error codes. +const ( + CodeParseError ErrorCode = -32700 // Invalid JSON received by the server + CodeInvalidRequest ErrorCode = -32600 // The JSON sent is not a valid request object + CodeMethodNotFound ErrorCode = -32601 // The method does not exist or is unavailable + CodeInvalidParams ErrorCode = -32602 // Invalid method parameters + CodeInternalError ErrorCode = -32603 // Internal JSON-RPC error +) + +var errorCodeString = map[ErrorCode]string{ + CodeParseError: "Parse error", + CodeInvalidRequest: "Invalid request", + CodeMethodNotFound: "Method not found", + CodeInvalidParams: "Invalid params", + CodeInternalError: "Internal error", } //---------------------------------------- // REQUEST type RPCRequest struct { - JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id,omitempty"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + id json.RawMessage + + Method string + Params json.RawMessage } -// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int -func (req *RPCRequest) UnmarshalJSON(data []byte) error { - unsafeReq := struct { - JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} - }{} - - err := json.Unmarshal(data, &unsafeReq) - if err != nil { - return err - } +// NewRequest returns an empty request with the specified ID. +func NewRequest(id int) RPCRequest { + return RPCRequest{id: []byte(strconv.Itoa(id))} +} - if unsafeReq.ID == nil { // notification - return nil - } +// ID returns a string representation of the request ID. +func (req RPCRequest) ID() string { return string(req.id) } - req.JSONRPC = unsafeReq.JSONRPC - req.Method = unsafeReq.Method - req.Params = unsafeReq.Params - id, err := idFromInterface(unsafeReq.ID) - if err != nil { +// IsNotification reports whether req is a notification (has an empty ID). +func (req RPCRequest) IsNotification() bool { return len(req.id) == 0 } + +type rpcRequestJSON struct { + V string `json:"jsonrpc"` // must be "2.0" + ID json.RawMessage `json:"id,omitempty"` + M string `json:"method"` + P json.RawMessage `json:"params"` +} + +// isNullOrEmpty reports whether data is empty or the JSON "null" value. +func isNullOrEmpty(data json.RawMessage) bool { + return len(data) == 0 || bytes.Equal(data, []byte("null")) +} + +// validID matches the text of a JSON value that is allowed to serve as a +// JSON-RPC request ID. Precondition: Target value is legal JSON. +var validID = regexp.MustCompile(`^(?:".*"|-?\d+)$`) + +// UnmarshalJSON decodes a request from a JSON-RPC 2.0 request object. +func (req *RPCRequest) UnmarshalJSON(data []byte) error { + var wrapper rpcRequestJSON + if err := json.Unmarshal(data, &wrapper); err != nil { return err + } else if wrapper.V != "" && wrapper.V != "2.0" { + return fmt.Errorf("invalid version: %q", wrapper.V) } - req.ID = id + if !isNullOrEmpty(wrapper.ID) { + if !validID.Match(wrapper.ID) { + return fmt.Errorf("invalid request ID: %q", string(wrapper.ID)) + } + req.id = wrapper.ID + } + req.Method = wrapper.M + req.Params = wrapper.P return nil } -func NewRPCRequest(id jsonrpcid, method string, params json.RawMessage) RPCRequest { - return RPCRequest{ - JSONRPC: "2.0", - ID: id, - Method: method, - Params: params, - } +// MarshalJSON marshals a request with the appropriate version tag. +func (req RPCRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(rpcRequestJSON{ + V: "2.0", + ID: req.id, + M: req.Method, + P: req.Params, + }) } func (req RPCRequest) String() string { - return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID, req.Method, req.Params) + return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID(), req.Method, req.Params) } -func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { - var paramsMap = make(map[string]json.RawMessage, len(params)) - for name, value := range params { - valueJSON, err := tmjson.Marshal(value) - if err != nil { - return RPCRequest{}, err - } - paramsMap[name] = valueJSON - } - - payload, err := json.Marshal(paramsMap) +// MakeResponse constructs a success response to req with the given result. If +// there is an error marshaling result to JSON, it returns an error response. +func (req RPCRequest) MakeResponse(result interface{}) RPCResponse { + data, err := json.Marshal(result) if err != nil { - return RPCRequest{}, err + return req.MakeErrorf(CodeInternalError, "marshaling result: %v", err) } + return RPCResponse{id: req.id, Result: data} +} - return NewRPCRequest(id, method, payload), nil +// MakeErrorf constructs an error response to req with the given code and a +// message constructed by formatting msg with args. +func (req RPCRequest) MakeErrorf(code ErrorCode, msg string, args ...interface{}) RPCResponse { + return RPCResponse{ + id: req.id, + Error: &RPCError{ + Code: int(code), + Message: code.String(), + Data: fmt.Sprintf(msg, args...), + }, + } } -func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { - var paramsMap = make([]json.RawMessage, len(params)) - for i, value := range params { - valueJSON, err := tmjson.Marshal(value) - if err != nil { - return RPCRequest{}, err - } - paramsMap[i] = valueJSON +// MakeError constructs an error response to req from the given error value. +// This function will panic if err == nil. +func (req RPCRequest) MakeError(err error) RPCResponse { + if err == nil { + panic("cannot construct an error response for nil") + } + if e, ok := err.(*RPCError); ok { + return RPCResponse{id: req.id, Error: e} + } + if errors.Is(err, coretypes.ErrZeroOrNegativeHeight) || + errors.Is(err, coretypes.ErrZeroOrNegativePerPage) || + errors.Is(err, coretypes.ErrPageOutOfRange) || + errors.Is(err, coretypes.ErrInvalidRequest) { + return RPCResponse{id: req.id, Error: &RPCError{ + Code: int(CodeInvalidRequest), + Message: CodeInvalidRequest.String(), + Data: err.Error(), + }} } + return RPCResponse{id: req.id, Error: &RPCError{ + Code: int(CodeInternalError), + Message: CodeInternalError.String(), + Data: err.Error(), + }} +} - payload, err := json.Marshal(paramsMap) +// SetMethodAndParams updates the method and parameters of req with the given +// values, leaving the ID unchanged. +func (req *RPCRequest) SetMethodAndParams(method string, params interface{}) error { + payload, err := json.Marshal(params) if err != nil { - return RPCRequest{}, err + return err } - - return NewRPCRequest(id, method, payload), nil + req.Method = method + req.Params = payload + return nil } //---------------------------------------- @@ -152,96 +191,57 @@ func (err RPCError) Error() string { } type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` + id json.RawMessage + + Result json.RawMessage + Error *RPCError } -// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int +// ID returns a representation of the response ID. +func (resp RPCResponse) ID() string { return string(resp.id) } + +type rpcResponseJSON struct { + V string `json:"jsonrpc"` // must be "2.0" + ID json.RawMessage `json:"id,omitempty"` + R json.RawMessage `json:"result,omitempty"` + E *RPCError `json:"error,omitempty"` +} + +// UnmarshalJSON decodes a response from a JSON-RPC 2.0 response object. func (resp *RPCResponse) UnmarshalJSON(data []byte) error { - unsafeResp := &struct { - JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` - }{} - err := json.Unmarshal(data, &unsafeResp) - if err != nil { - return err - } - resp.JSONRPC = unsafeResp.JSONRPC - resp.Error = unsafeResp.Error - resp.Result = unsafeResp.Result - if unsafeResp.ID == nil { - return nil - } - id, err := idFromInterface(unsafeResp.ID) - if err != nil { + var wrapper rpcResponseJSON + if err := json.Unmarshal(data, &wrapper); err != nil { return err + } else if wrapper.V != "" && wrapper.V != "2.0" { + return fmt.Errorf("invalid version: %q", wrapper.V) } - resp.ID = id - return nil -} - -func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { - var rawMsg json.RawMessage - if res != nil { - var js []byte - js, err := tmjson.Marshal(res) - if err != nil { - return RPCInternalError(id, fmt.Errorf("error marshaling response: %w", err)) + if !isNullOrEmpty(wrapper.ID) { + if !validID.Match(wrapper.ID) { + return fmt.Errorf("invalid response ID: %q", string(wrapper.ID)) } - rawMsg = json.RawMessage(js) + resp.id = wrapper.ID } - - return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} + resp.Error = wrapper.E + resp.Result = wrapper.R + return nil } -func NewRPCErrorResponse(id jsonrpcid, code int, msg string, data string) RPCResponse { - return RPCResponse{ - JSONRPC: "2.0", - ID: id, - Error: &RPCError{Code: code, Message: msg, Data: data}, - } +// MarshalJSON marshals a response with the appropriate version tag. +func (resp RPCResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(rpcResponseJSON{ + V: "2.0", + ID: resp.id, + R: resp.Result, + E: resp.Error, + }) } func (resp RPCResponse) String() string { if resp.Error == nil { - return fmt.Sprintf("RPCResponse{%s %X}", resp.ID, resp.Result) + return fmt.Sprintf("RPCResponse{%s %X}", resp.ID(), resp.Result) } - return fmt.Sprintf("RPCResponse{%s %v}", resp.ID, resp.Error) -} - -// From the JSON-RPC 2.0 spec: -// If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. -func RPCParseError(err error) RPCResponse { - return NewRPCErrorResponse(nil, -32700, "Parse error", err.Error()) -} - -// From the JSON-RPC 2.0 spec: -// If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. -func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) -} - -func RPCMethodNotFoundError(id jsonrpcid) RPCResponse { - return NewRPCErrorResponse(id, -32601, "Method not found", "") -} - -func RPCInvalidParamsError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) -} - -func RPCInternalError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) -} - -func RPCServerError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) + return fmt.Sprintf("RPCResponse{%s %v}", resp.ID(), resp.Error) } //---------------------------------------- @@ -253,55 +253,49 @@ type WSRPCConnection interface { // WriteRPCResponse writes the response onto connection (BLOCKING). WriteRPCResponse(context.Context, RPCResponse) error // TryWriteRPCResponse tries to write the response onto connection (NON-BLOCKING). - TryWriteRPCResponse(RPCResponse) bool + TryWriteRPCResponse(context.Context, RPCResponse) bool // Context returns the connection's context. Context() context.Context } -// Context is the first parameter for all functions. It carries a json-rpc -// request, http request and websocket connection. -// -// - JSONReq is non-nil when JSONRPC is called over websocket or HTTP. -// - WSConn is non-nil when we're connected via a websocket. -// - HTTPReq is non-nil when URI or JSONRPC is called over HTTP. -type Context struct { - // json-rpc request - JSONReq *RPCRequest - // websocket connection - WSConn WSRPCConnection - // http request - HTTPReq *http.Request +// CallInfo carries JSON-RPC request metadata for RPC functions invoked via +// JSON-RPC. It can be recovered from the context with GetCallInfo. +type CallInfo struct { + RPCRequest *RPCRequest // non-nil for requests via HTTP or websocket + HTTPRequest *http.Request // non-nil for requests via HTTP + WSConn WSRPCConnection // non-nil for requests via websocket +} + +type callInfoKey struct{} + +// WithCallInfo returns a child context of ctx with the ci attached. +func WithCallInfo(ctx context.Context, ci *CallInfo) context.Context { + return context.WithValue(ctx, callInfoKey{}, ci) } -// RemoteAddr returns the remote address (usually a string "IP:port"). -// If neither HTTPReq nor WSConn is set, an empty string is returned. -// HTTP: -// http.Request#RemoteAddr -// WS: -// result of GetRemoteAddr -func (ctx *Context) RemoteAddr() string { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.RemoteAddr - } else if ctx.WSConn != nil { - return ctx.WSConn.GetRemoteAddr() +// GetCallInfo returns the CallInfo record attached to ctx, or nil if ctx does +// not contain a call record. +func GetCallInfo(ctx context.Context) *CallInfo { + if v := ctx.Value(callInfoKey{}); v != nil { + return v.(*CallInfo) } - return "" + return nil } -// Context returns the request's context. -// The returned context is always non-nil; it defaults to the background context. -// HTTP: -// The context is canceled when the client's connection closes, the request -// is canceled (with HTTP/2), or when the ServeHTTP method returns. -// WS: -// The context is canceled when the client's connections closes. -func (ctx *Context) Context() context.Context { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.Context() - } else if ctx.WSConn != nil { - return ctx.WSConn.Context() +// RemoteAddr returns the remote address (usually a string "IP:port"). If +// neither HTTPRequest nor WSConn is set, an empty string is returned. +// +// For HTTP requests, this reports the request's RemoteAddr. +// For websocket requests, this reports the connection's GetRemoteAddr. +func (ci *CallInfo) RemoteAddr() string { + if ci == nil { + return "" + } else if ci.HTTPRequest != nil { + return ci.HTTPRequest.RemoteAddr + } else if ci.WSConn != nil { + return ci.WSConn.GetRemoteAddr() } - return context.Background() + return "" } //---------------------------------------- diff --git a/rpc/jsonrpc/types/types_test.go b/rpc/jsonrpc/types/types_test.go index d57a0403d9..d5be2f74dd 100644 --- a/rpc/jsonrpc/types/types_test.go +++ b/rpc/jsonrpc/types/types_test.go @@ -2,69 +2,59 @@ package types import ( "encoding/json" - "errors" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type SampleResult struct { Value string } -type responseTest struct { - id jsonrpcid - expected string -} - -var responseTests = []responseTest{ - {JSONRPCStringID("1"), `"1"`}, - {JSONRPCStringID("alphabet"), `"alphabet"`}, - {JSONRPCStringID(""), `""`}, - {JSONRPCStringID("àáâ"), `"àáâ"`}, - {JSONRPCIntID(-1), "-1"}, - {JSONRPCIntID(0), "0"}, - {JSONRPCIntID(1), "1"}, - {JSONRPCIntID(100), "100"}, +// Valid JSON identifier texts. +var testIDs = []string{ + `"1"`, `"alphabet"`, `""`, `"àáâ"`, "-1", "0", "1", "100", } func TestResponses(t *testing.T) { - assert := assert.New(t) - for _, tt := range responseTests { - jsonid := tt.id - a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) - b, _ := json.Marshal(a) - s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) - assert.Equal(s, string(b)) + for _, id := range testIDs { + req := RPCRequest{id: json.RawMessage(id)} - d := RPCParseError(errors.New("hello world")) - e, _ := json.Marshal(d) - f := `{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":"hello world"}}` - assert.Equal(f, string(e)) + a := req.MakeResponse(&SampleResult{"hello"}) + b, err := json.Marshal(a) + require.NoError(t, err, "input id: %q", id) + s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, id) + assert.Equal(t, s, string(b)) - g := RPCMethodNotFoundError(jsonid) - h, _ := json.Marshal(g) - i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) - assert.Equal(string(h), i) + d := req.MakeErrorf(CodeParseError, "hello world") + e, err := json.Marshal(d) + require.NoError(t, err) + f := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error","data":"hello world"}}`, id) + assert.Equal(t, f, string(e)) + + g := req.MakeErrorf(CodeMethodNotFound, "foo") + h, err := json.Marshal(g) + require.NoError(t, err) + i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found","data":"foo"}}`, id) + assert.Equal(t, string(h), i) } } func TestUnmarshallResponses(t *testing.T) { - assert := assert.New(t) - for _, tt := range responseTests { + for _, id := range testIDs { response := &RPCResponse{} - err := json.Unmarshal( - []byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), - response, - ) - assert.Nil(err) - a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) - assert.Equal(*response, a) + input := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, id) + require.NoError(t, json.Unmarshal([]byte(input), &response)) + + req := RPCRequest{id: json.RawMessage(id)} + a := req.MakeResponse(&SampleResult{"hello"}) + assert.Equal(t, *response, a) } - response := &RPCResponse{} - err := json.Unmarshal([]byte(`{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}`), response) - assert.NotNil(err) + var response RPCResponse + const input = `{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}` + require.Error(t, json.Unmarshal([]byte(input), &response)) } func TestRPCError(t *testing.T) { diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 83d85be8fa..ba48ee82ef 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -296,28 +296,28 @@ paths: Events: []abci.Event{ { Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrA"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcX"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "AddrA", Index: true}, + {Key: "source", Value: "SrcX", Index: true}, + {Key: "amount", Value: "...", Index: true}, + {Key: "balance", Value: "...", Index: true}, }, }, { Type: "rewards.withdraw", - Attributes: abci.EventAttribute{ - {Key: []byte("address"), Value: []byte("AddrB"), Index: true}, - {Key: []byte("source"), Value: []byte("SrcY"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, - {Key: []byte("balance"), Value: []byte("..."), Index: true}, + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "AddrB", Index: true}, + {Key: "source", Value: "SrcY", Index: true}, + {Key: "amount", Value: "...", Index: true}, + {Key: "balance", Value: "...", Index: true}, }, }, { Type: "transfer", - Attributes: abci.EventAttribute{ - {Key: []byte("sender"), Value: []byte("AddrC"), Index: true}, - {Key: []byte("recipient"), Value: []byte("AddrD"), Index: true}, - {Key: []byte("amount"), Value: []byte("..."), Index: true}, + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "AddrC", Index: true}, + {Key: "recipient", Value: "AddrD", Index: true}, + {Key: "amount", Value: "...", Index: true}, }, }, }, @@ -365,10 +365,14 @@ paths: import rpchttp "github.com/tendermint/rpc/client/http" import "github.com/tendermint/tendermint/types" - client := rpchttp.New("tcp://0.0.0.0:26657") - err := client.Start() + client, err := rpchttp.New("tcp://0.0.0.0:26657", "/websocket") if err != nil { - handle error + // handle error + } + + err = client.Start() + if err != nil { + // handle error } defer client.Stop() ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) @@ -376,7 +380,7 @@ paths: query := "tm.event = 'Tx' AND tx.height = 3" txs, err := client.Subscribe(ctx, "test-client", query) if err != nil { - handle error + // handle error } go func() { @@ -422,16 +426,20 @@ paths: operationId: unsubscribe description: | ```go - client := rpchttp.New("tcp://0.0.0.0:26657") + client, err := rpchttp.New("tcp://0.0.0.0:26657", "/websocket") + if err != nil { + // handle error + } + err := client.Start() if err != nil { - handle error + // handle error } defer client.Stop() query := "tm.event = 'Tx' AND tx.height = 3" err = client.Unsubscribe(context.Background(), "test-client", query) if err != nil { - handle error + // handle error } ``` parameters: @@ -525,7 +533,7 @@ paths: $ref: "#/components/schemas/ErrorResponse" /net_info: get: - summary: Network informations + summary: Network information operationId: net_info tags: - Info @@ -693,6 +701,64 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /header: + get: + summary: Get the header at a specified height + operationId: header + parameters: + - in: query + name: height + schema: + type: integer + default: 0 + example: 1 + description: height to return. If no height is provided, it will fetch the latest height. + tags: + - Info + description: | + Retrieve the block header corresponding to a specified height. + responses: + "200": + description: Header information. + content: + application/json: + schema: + $ref: "#/components/schemas/HeaderResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /header_by_hash: + get: + summary: Get header by hash + operationId: header_by_hash + parameters: + - in: query + name: hash + description: header hash + required: true + schema: + type: string + example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + tags: + - Info + description: | + Retrieve the block header corresponding to a block hash. + responses: + "200": + description: Header information. + content: + application/json: + schema: + $ref: "#/components/schemas/HeaderResponse" + "500": + description: Error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" /block: get: summary: Get block at a specified height @@ -711,7 +777,7 @@ paths: Get Block. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -740,7 +806,7 @@ paths: Get Block By Hash. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -758,7 +824,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch informations regarding the latest block. + description: height to return. If no height is provided, it will fetch information regarding the latest block. schema: type: integer default: 0 @@ -787,7 +853,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -968,7 +1034,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -996,13 +1062,21 @@ paths: operationId: unconfirmed_txs parameters: - in: query - name: limit - description: Maximum number of unconfirmed transactions to return (max 100) + name: page + description: "Page number (1-based)" required: false schema: type: integer - default: 30 + default: 1 example: 1 + - in: query + name: per_page + description: "Number of entries per page (max: 100)" + required: false + schema: + type: integer + example: 100 + default: 30 tags: - Info description: | @@ -1423,25 +1497,25 @@ components: example: "0" total_snapshots: type: string - example: "10" + example: "10" chunk_process_avg_time: type: string - example: "1000000000" + example: "1000000000" snapshot_height: type: string - example: "1262196" + example: "1262196" snapshot_chunks_count: type: string - example: "10" + example: "10" snapshot_chunks_total: type: string - example: "100" + example: "100" backfilled_blocks: type: string - example: "10" + example: "10" backfill_blocks_total: type: string - example: "100" + example: "100" ValidatorInfo: type: object properties: @@ -1703,13 +1777,21 @@ components: block: $ref: "#/components/schemas/Block" BlockResponse: - description: Blockc info + description: Block info allOf: - $ref: "#/components/schemas/JSONRPC" - type: object properties: result: $ref: "#/components/schemas/BlockComplete" + HeaderResponse: + description: Block Header info + allOf: + - $ref: "#/components/schemas/JSONRPC" + - type: object + properties: + result: + $ref: "#/components/schemas/BlockHeader" ################## FROM NOW ON NEEDS REFACTOR ################## BlockResultsResponse: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 189289b0a8..42f78c01f9 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -32,7 +32,7 @@ func waitForRPC(ctx context.Context, conf *config.Config) { } result := new(coretypes.ResultStatus) for { - _, err := client.Call(ctx, "status", map[string]interface{}{}, result) + err := client.Call(ctx, "status", map[string]interface{}{}, result) if err == nil { return } @@ -57,23 +57,29 @@ func makeAddrs() (p2pAddr, rpcAddr string) { return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) *config.Config { - c := config.ResetTestRoot(testName) +func CreateConfig(testName string) (*config.Config, error) { + c, err := config.ResetTestRoot(testName) + if err != nil { + return nil, err + } p2pAddr, rpcAddr := makeAddrs() c.P2P.ListenAddress = p2pAddr c.RPC.ListenAddress = rpcAddr c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - return c + return c, nil } type ServiceCloser func(context.Context) error -func StartTendermint(ctx context.Context, +func StartTendermint( + ctx context.Context, conf *config.Config, app abci.Application, - opts ...func(*Options)) (service.Service, ServiceCloser, error) { + opts ...func(*Options), +) (service.Service, ServiceCloser, error) { + ctx, cancel := context.WithCancel(ctx) nodeOpts := &Options{} for _, opt := range opts { @@ -83,17 +89,17 @@ func StartTendermint(ctx context.Context, if nodeOpts.suppressStdout { logger = log.NewNopLogger() } else { - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) } papp := abciclient.NewLocalCreator(app) - tmNode, err := node.New(conf, logger, papp, nil) + tmNode, err := node.New(ctx, conf, logger, papp, nil) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - err = tmNode.Start() + err = tmNode.Start(ctx) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } waitForRPC(ctx, conf) @@ -103,9 +109,7 @@ func StartTendermint(ctx context.Context, } return tmNode, func(ctx context.Context) error { - if err := tmNode.Stop(); err != nil { - logger.Error("Error when trying to stop node", "err", err) - } + cancel() tmNode.Wait() os.RemoveAll(conf.RootDir) return nil diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 6b60ac2fc7..e8d3fcf932 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -9,13 +9,13 @@ package main import ( "bufio" + "encoding/json" "fmt" "io" "os" "strings" "github.com/tendermint/tendermint/internal/consensus" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -27,13 +27,13 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer f.Close() walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer walFile.Close() @@ -48,7 +48,7 @@ func main() { if err == io.EOF { break } else if err != nil { - panic(fmt.Errorf("failed to read file: %v", err)) + panic(fmt.Errorf("failed to read file: %w", err)) } // ignore the ENDHEIGHT in json.File if strings.HasPrefix(string(msgJSON), "ENDHEIGHT") { @@ -56,14 +56,14 @@ func main() { } var msg consensus.TimedWALMessage - err = tmjson.Unmarshal(msgJSON, &msg) + err = json.Unmarshal(msgJSON, &msg) if err != nil { - panic(fmt.Errorf("failed to unmarshal json: %v", err)) + panic(fmt.Errorf("failed to unmarshal json: %w", err)) } err = dec.Encode(&msg) if err != nil { - panic(fmt.Errorf("failed to encode msg: %v", err)) + panic(fmt.Errorf("failed to encode msg: %w", err)) } } } diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index cc38934145..c36ebec13c 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -1,16 +1,46 @@ #!/usr/bin/env bash +set -euo pipefail -# should be executed at the root directory of the project -set -eo pipefail +# By default, this script runs against the latest commit to the master branch +# in the Tendermint spec repository. To use this script with a different version +# of the spec repository, run it with the $VERS environment variable set to the +# desired branch name or commit hash from the spec repo. -git clone https://github.com/celestiaorg/spec +: ${VERS:="evan/tendermint-master-2-10"} -cp -r spec/proto/tendermint proto +echo "fetching proto files" -buf generate --path proto/tendermint +# Get shortened ref of commit +REF=$(curl -H "Accept: application/vnd.github.v3.sha" -qL \ + "https://api.github.com/repos/celestiaorg/spec/commits/${VERS}" \ + | cut -c -7) + +readonly OUTDIR="celestiaorg-spec-${REF}" +mkdir ${OUTDIR} +echo OUTDIR +curl -qL "https://api.github.com/repos/celestiaorg/spec/tarball/${REF}" | tar -xzf - ${OUTDIR}/ + +cp -r ${OUTDIR}/proto/tendermint/* ./proto/tendermint +cp -r ${OUTDIR}/third_party/** ./third_party + +MODNAME="$(go list -m)" +find ./proto/tendermint -name '*.proto' -not -path "./proto/tendermint/abci/types.proto" \ + -exec sh ./scripts/protopackage.sh {} "$MODNAME" ';' + +# For historical compatibility, the abci file needs to get a slightly different import name +# so that it can be moved into the ./abci/types directory. +sh ./scripts/protopackage.sh ./proto/tendermint/abci/types.proto $MODNAME "abci/types" + +buf generate --path proto/tendermint --template ./${OUTDIR}/buf.gen.yaml --config ./${OUTDIR}/buf.yaml mv ./proto/tendermint/abci/types.pb.go ./abci/types -find proto/tendermint/ -name "*.proto" -exec rm -rf {} \; +echo "proto files have been compiled" + +echo "removing copied files" + +find ${OUTDIR}/proto/tendermint/ -name *.proto \ + | sed "s/$OUTDIR\/\(.*\)/\1/g" \ + | xargs -I {} rm {} -rm -rf spec +rm -rf ${OUTDIR} diff --git a/scripts/protopackage.sh b/scripts/protopackage.sh new file mode 100755 index 0000000000..a69e758ca0 --- /dev/null +++ b/scripts/protopackage.sh @@ -0,0 +1,23 @@ +#!/usr/bin/sh +set -eo pipefail + +# This script appends the "option go_package" proto option to the file located at $FNAME. +# This option specifies what the package will be named when imported by other packages. +# This option is not directly included in the proto files to allow the files to more easily +# be hosted in github.com/tendermint/spec and shared between other repos. +# If the option is already specified in the file, it will be replaced using the +# arguments passed to this script. + +FNAME="${1:?missing required .proto filename}" +MODNAME=$(echo $2| sed 's/\//\\\//g') +PACKAGE="$(dirname $FNAME | sed 's/^\.\/\(.*\)/\1/g' | sed 's/\//\\\//g')" +if [[ ! -z "$3" ]]; then + PACKAGE="$(echo $3 | sed 's/\//\\\//g')" +fi + + +if ! grep -q 'option\s\+go_package\s\+=\s\+.*;' $FNAME; then + sed -i "s/\(package tendermint.*\)/\1\n\noption go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +else + sed -i "s/option\s\+go_package\s\+=\s\+.*;/option go_package = \"$MODNAME\/$PACKAGE\";/g" $FNAME +fi diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 5a5a0abac3..7ee7561068 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -8,12 +8,12 @@ package main import ( + "encoding/json" "fmt" "io" "os" "github.com/tendermint/tendermint/internal/consensus" - tmjson "github.com/tendermint/tendermint/libs/json" ) func main() { @@ -24,7 +24,7 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer f.Close() @@ -34,12 +34,12 @@ func main() { if err == io.EOF { break } else if err != nil { - panic(fmt.Errorf("failed to decode msg: %v", err)) + panic(fmt.Errorf("failed to decode msg: %w", err)) } - json, err := tmjson.Marshal(msg) + json, err := json.Marshal(msg) if err != nil { - panic(fmt.Errorf("failed to marshal msg: %v", err)) + panic(fmt.Errorf("failed to marshal msg: %w", err)) } _, err = os.Stdout.Write(json) diff --git a/test/Makefile b/test/Makefile index 86226cf03a..d141bb6844 100644 --- a/test/Makefile +++ b/test/Makefile @@ -3,6 +3,8 @@ ######################################## ### Testing +PACKAGES=$(shell go list ./...) + BINDIR ?= $(GOPATH)/bin ## required to be run first by most tests diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 034e28878d..305a2926c6 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -57,7 +57,7 @@ echo "... testing query with /abci_query 2" # we should be able to look up the key RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` +RESPONSE=`echo $RESPONSE | jq .response.log` set +e A=`echo $RESPONSE | grep 'exists'` @@ -70,7 +70,7 @@ set -e # we should not be able to look up the value RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` +RESPONSE=`echo $RESPONSE | jq .response.log` set +e A=`echo $RESPONSE | grep 'exists'` if [[ $? == 0 ]]; then diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 9110b4bf57..6d472db4cc 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.16 +FROM golang:1.17 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index b79cb4e5aa..ad840b6a11 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "path/filepath" + "sort" "strconv" "github.com/tendermint/tendermint/abci/example/code" @@ -79,7 +80,7 @@ func DefaultConfig(dir string) *Config { // NewApplication creates the application. func NewApplication(cfg *Config) (*Application, error) { - state, err := NewState(filepath.Join(cfg.Dir, "state.json"), cfg.PersistInterval) + state, err := NewState(cfg.Dir, cfg.PersistInterval) if err != nil { return nil, err } @@ -88,7 +89,7 @@ func NewApplication(cfg *Config) (*Application, error) { return nil, err } return &Application{ - logger: log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false), + logger: log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo), state: state, snapshots: snapshots, cfg: cfg, @@ -97,6 +98,9 @@ func NewApplication(cfg *Config) (*Application, error) { // Info implements ABCI. func (app *Application) Info(req abci.RequestInfo) abci.ResponseInfo { + app.state.RLock() + defer app.state.RUnlock() + return abci.ResponseInfo{ Version: version.ABCIVersion, AppVersion: 1, @@ -263,10 +267,13 @@ func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) a return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT} } -// PreprocessTxs implements ABCI -func (app *Application) PreprocessTxs( - req abci.RequestPreprocessTxs) abci.ResponsePreprocessTxs { - return abci.ResponsePreprocessTxs{Txs: req.Txs} +func (app *Application) PrepareProposal( + req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { + return abci.ResponsePrepareProposal{BlockData: req.BlockData} +} + +func (app *Application) Rollback() error { + return app.state.Rollback() } // validatorUpdates generates a validator set update. @@ -285,6 +292,14 @@ func (app *Application) validatorUpdates(height uint64) (abci.ValidatorUpdates, } valUpdates = append(valUpdates, abci.UpdateValidator(keyBytes, int64(power), app.cfg.KeyType)) } + + // the validator updates could be returned in arbitrary order, + // and that seems potentially bad. This orders the validator + // set. + sort.Slice(valUpdates, func(i, j int) bool { + return valUpdates[i].PubKey.Compare(valUpdates[j].PubKey) < 0 + }) + return valUpdates, nil } diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 6a9c0e0dcf..65edbc3a56 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -48,7 +47,7 @@ func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") metadata := []abci.Snapshot{} - bz, err := ioutil.ReadFile(file) + bz, err := os.ReadFile(file) switch { case errors.Is(err, os.ErrNotExist): case err != nil: @@ -75,7 +74,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0644) // nolint: gosec if err != nil { return err } @@ -96,7 +95,7 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { Hash: hashItems(state.Values), Chunks: byteChunks(bz), } - err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) if err != nil { return abci.Snapshot{}, err } @@ -146,7 +145,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ defer s.RUnlock() for _, snapshot := range s.metadata { if snapshot.Height == height && snapshot.Format == format { - bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) if err != nil { return nil, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index 4419264530..e82a225397 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -6,12 +6,15 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" + "path/filepath" "sort" "sync" ) +const stateFileName = "app_state.json" +const prevStateFileName = "prev_app_state.json" + // State is the application state. type State struct { sync.RWMutex @@ -20,16 +23,19 @@ type State struct { Hash []byte // private fields aren't marshaled to disk. - file string + currentFile string + // app saves current and previous state for rollback functionality + previousFile string persistInterval uint64 initialHeight uint64 } // NewState creates a new state. -func NewState(file string, persistInterval uint64) (*State, error) { +func NewState(dir string, persistInterval uint64) (*State, error) { state := &State{ Values: make(map[string]string), - file: file, + currentFile: filepath.Join(dir, stateFileName), + previousFile: filepath.Join(dir, prevStateFileName), persistInterval: persistInterval, } state.Hash = hashItems(state.Values) @@ -45,13 +51,22 @@ func NewState(file string, persistInterval uint64) (*State, error) { // load loads state from disk. It does not take out a lock, since it is called // during construction. func (s *State) load() error { - bz, err := ioutil.ReadFile(s.file) + bz, err := os.ReadFile(s.currentFile) if err != nil { - return fmt.Errorf("failed to read state from %q: %w", s.file, err) + // if the current state doesn't exist then we try recover from the previous state + if errors.Is(err, os.ErrNotExist) { + bz, err = os.ReadFile(s.previousFile) + if err != nil { + return fmt.Errorf("failed to read both current and previous state (%q): %w", + s.previousFile, err) + } + } else { + return fmt.Errorf("failed to read state from %q: %w", s.currentFile, err) + } } err = json.Unmarshal(bz, s) if err != nil { - return fmt.Errorf("invalid state data in %q: %w", s.file, err) + return fmt.Errorf("invalid state data in %q: %w", s.currentFile, err) } return nil } @@ -65,12 +80,19 @@ func (s *State) save() error { } // We write the state to a separate file and move it to the destination, to // make it atomic. - newFile := fmt.Sprintf("%v.new", s.file) - err = ioutil.WriteFile(newFile, bz, 0644) + newFile := fmt.Sprintf("%v.new", s.currentFile) + err = os.WriteFile(newFile, bz, 0644) if err != nil { - return fmt.Errorf("failed to write state to %q: %w", s.file, err) + return fmt.Errorf("failed to write state to %q: %w", s.currentFile, err) + } + // We take the current state and move it to the previous state, replacing it + if _, err := os.Stat(s.currentFile); err == nil { + if err := os.Rename(s.currentFile, s.previousFile); err != nil { + return fmt.Errorf("failed to replace previous state: %w", err) + } } - return os.Rename(newFile, s.file) + // Finally, we take the new state and replace the current state. + return os.Rename(newFile, s.currentFile) } // Export exports key/value pairs as JSON, used for state sync snapshots. @@ -136,6 +158,18 @@ func (s *State) Commit() (uint64, []byte, error) { return s.Height, s.Hash, nil } +func (s *State) Rollback() error { + bz, err := os.ReadFile(s.previousFile) + if err != nil { + return fmt.Errorf("failed to read state from %q: %w", s.previousFile, err) + } + err = json.Unmarshal(bz, s) + if err != nil { + return fmt.Errorf("invalid state data in %q: %w", s.previousFile, err) + } + return nil +} + // hashItems hashes a set of key/value items. func hashItems(items map[string]string) []byte { keys := make([]string, 0, len(items)) diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 260df23f3a..4e19fe9f8e 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.16 +FROM golang:1.17 RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 61b4bf7d38..90c19e6ffa 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -44,10 +44,6 @@ var ( "tcp": 20, "unix": 10, } - nodeMempools = weightedChoice{ - "v0": 20, - "v1": 80, - } nodeStateSyncs = weightedChoice{ e2e.StateSyncDisabled: 10, e2e.StateSyncP2P: 45, @@ -117,7 +113,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er KeyType: keyType.Choose(r).(string), Evidence: evidence.Choose(r).(int), QueueType: opt["queueType"].(string), - TxSize: int64(txSize.Choose(r).(int)), + TxSize: txSize.Choose(r).(int), } var numSeeds, numValidators, numFulls, numLightClients int @@ -277,7 +273,6 @@ func generateNode( StartAt: startAt, Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), - Mempool: nodeMempools.Choose(r), StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -285,6 +280,10 @@ func generateNode( Perturb: nodePerturbations.Choose(r), } + if node.PrivvalProtocol == "" { + node.PrivvalProtocol = "file" + } + if startAt > 0 { node.StateSync = nodeStateSyncs.Choose(r) if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index 79a20f27e8..0e0e66baad 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -26,15 +26,21 @@ func TestGenerator(t *testing.T) { numStateSyncs++ } t.Run(name, func(t *testing.T) { - if node.StartAt > m.InitialHeight+5 && !node.Stateless() { - require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + t.Run("StateSync", func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + }) + if e2e.Mode(node.Mode) != e2e.ModeLight { + t.Run("PrivvalProtocol", func(t *testing.T) { + require.NotZero(t, node.PrivvalProtocol) + }) } - if node.StateSync != e2e.StateSyncDisabled { - require.Zero(t, node.Seeds, node.StateSync) - require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, - "peers: %v", node.PersistentPeers) - } - }) } require.True(t, numStateSyncs <= 2) diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 38f36d0dac..10b8092795 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -17,7 +17,7 @@ const ( randomSeed int64 = 4827085738 ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) func main() { NewCLI().Run() diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index b5d9debe94..5a60a20bd9 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -34,10 +34,13 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) // main is the binary entrypoint. func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if len(os.Args) != 2 { fmt.Printf("Usage: %v ", os.Args[0]) return @@ -47,14 +50,14 @@ func main() { configFile = os.Args[1] } - if err := run(configFile); err != nil { + if err := run(ctx, configFile); err != nil { logger.Error(err.Error()) os.Exit(1) } } // run runs the application - basically like main() with error handling. -func run(configFile string) error { +func run(ctx context.Context, configFile string) error { cfg, err := LoadConfig(configFile) if err != nil { return err @@ -62,7 +65,7 @@ func run(configFile string) error { // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { - if err = startSigner(cfg); err != nil { + if err = startSigner(ctx, cfg); err != nil { return err } if cfg.Protocol == "builtin" { @@ -73,15 +76,15 @@ func run(configFile string) error { // Start app server. switch cfg.Protocol { case "socket", "grpc": - err = startApp(cfg) + err = startApp(ctx, cfg) case "builtin": switch cfg.Mode { case string(e2e.ModeLight): - err = startLightNode(cfg) + err = startLightNode(ctx, cfg) case string(e2e.ModeSeed): - err = startSeedNode() + err = startSeedNode(ctx) default: - err = startNode(cfg) + err = startNode(ctx, cfg) } default: err = fmt.Errorf("invalid protocol %q", cfg.Protocol) @@ -97,16 +100,16 @@ func run(configFile string) error { } // startApp starts the application server, listening for connections from Tendermint. -func startApp(cfg *Config) error { +func startApp(ctx context.Context, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err } - server, err := server.NewServer(cfg.Listen, cfg.Protocol, app) + server, err := server.NewServer(logger, cfg.Listen, cfg.Protocol, app) if err != nil { return err } - err = server.Start() + err = server.Start(ctx) if err != nil { return err } @@ -118,7 +121,7 @@ func startApp(cfg *Config) error { // configuration is in $TMHOME/config/tendermint.toml. // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. -func startNode(cfg *Config) error { +func startNode(ctx context.Context, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err @@ -129,7 +132,9 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } - n, err := node.New(tmcfg, + n, err := node.New( + ctx, + tmcfg, nodeLogger, abciclient.NewLocalCreator(app), nil, @@ -137,10 +142,10 @@ func startNode(cfg *Config) error { if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startSeedNode() error { +func startSeedNode(ctx context.Context) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) @@ -148,14 +153,14 @@ func startSeedNode() error { tmcfg.Mode = config.ModeSeed - n, err := node.New(tmcfg, nodeLogger, nil, nil) + n, err := node.New(ctx, tmcfg, nodeLogger, nil, nil) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startLightNode(cfg *Config) error { +func startLightNode(ctx context.Context, cfg *Config) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return err @@ -204,7 +209,7 @@ func startLightNode(cfg *Config) error { } logger.Info("Starting proxy...", "laddr", tmcfg.RPC.ListenAddress) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } @@ -213,7 +218,7 @@ func startLightNode(cfg *Config) error { } // startSigner starts a signer server connecting to the given endpoint. -func startSigner(cfg *Config) error { +func startSigner(ctx context.Context, cfg *Config) error { filePV, err := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) if err != nil { return err @@ -241,6 +246,10 @@ func startSigner(cfg *Config) error { if err := s.Serve(lis); err != nil { panic(err) } + go func() { + <-ctx.Done() + s.GracefulStop() + }() }() return nil @@ -251,7 +260,8 @@ func startSigner(cfg *Config) error { endpoint := privval.NewSignerDialerEndpoint(logger, dialFn, privval.SignerDialerEndpointRetryWaitInterval(1*time.Second), privval.SignerDialerEndpointConnRetries(100)) - err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start() + + err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start(ctx) if err != nil { return err } @@ -287,7 +297,7 @@ func setupNode() (*config.Config, log.Logger, error) { return nil, nil, fmt.Errorf("error in config file: %w", err) } - nodeLogger, err := log.NewDefaultLogger(tmcfg.LogFormat, tmcfg.LogLevel, false) + nodeLogger, err := log.NewDefaultLogger(tmcfg.LogFormat, tmcfg.LogLevel) if err != nil { return nil, nil, err } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 16b99cfdaa..895e629395 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -64,7 +64,7 @@ type Manifest struct { QueueType string `toml:"queue_type"` // Number of bytes per tx. Default is 1kb (1024) - TxSize int64 + TxSize int // ABCIProtocol specifies the protocol used to communicate with the ABCI // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. @@ -104,10 +104,6 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // BlockSync specifies the block sync mode: "" (disable), "v0" or "v2". - // Defaults to disabled. - BlockSync string `toml:"block_sync"` - // Mempool specifies which version of mempool to use. Either "v0" or "v1" Mempool string `toml:"mempool_version"` diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index d0770ce8d5..f4b75c71a9 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -70,7 +70,7 @@ type Testnet struct { KeyType string Evidence int LogLevel string - TxSize int64 + TxSize int ABCIProtocol string } @@ -84,7 +84,6 @@ type Node struct { IP net.IP ProxyPort uint32 StartAt int64 - BlockSync string Mempool string StateSync string Database string @@ -177,7 +176,6 @@ func LoadTestnet(file string) (*Testnet, error) { ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: "v0", Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -335,11 +333,6 @@ func (n Node) Validate(testnet Testnet) error { } } } - switch n.BlockSync { - case "", "v0", "v2": - default: - return fmt.Errorf("invalid block sync setting %q", n.BlockSync) - } switch n.StateSync { case StateSyncDisabled, StateSyncP2P, StateSyncRPC: default: diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 25c4a1cc44..06a76a79d0 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -3,17 +3,17 @@ package main import ( "bytes" "context" + "encoding/json" "errors" "fmt" - "io/ioutil" "math/rand" + "os" "path/filepath" "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" e2e "github.com/tendermint/tendermint/test/e2e/pkg" @@ -33,14 +33,10 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo var targetNode *e2e.Node for _, idx := range r.Perm(len(testnet.Nodes)) { - targetNode = testnet.Nodes[idx] - - if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { - targetNode = nil - continue + if !testnet.Nodes[idx].Stateless() { + targetNode = testnet.Nodes[idx] + break } - - break } if targetNode == nil { @@ -55,15 +51,14 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } // request the latest block and validator set from the node - blockRes, err := client.Block(context.Background(), nil) + blockRes, err := client.Block(ctx, nil) if err != nil { return err } - evidenceHeight := blockRes.Block.Height - waitHeight := blockRes.Block.Height + 3 + evidenceHeight := blockRes.Block.Height - 3 nValidators := 100 - valRes, err := client.Validators(context.Background(), &evidenceHeight, nil, &nValidators) + valRes, err := client.Validators(ctx, &evidenceHeight, nil, &nValidators) if err != nil { return err } @@ -79,12 +74,8 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - wctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, waitHeight) + // request the latest block and validator set from the node + blockRes, err = client.Block(ctx, &evidenceHeight) if err != nil { return err } @@ -92,11 +83,11 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo var ev types.Evidence for i := 1; i <= amount; i++ { if i%lightClientEvidenceRatio == 0 { - ev, err = generateLightClientAttackEvidence( + ev, err = generateLightClientAttackEvidence(ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) } else { - ev, err = generateDuplicateVoteEvidence( + ev, err = generateDuplicateVoteEvidence(ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) } @@ -104,24 +95,28 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - _, err := client.BroadcastEvidence(context.Background(), ev) + _, err := client.BroadcastEvidence(ctx, ev) if err != nil { return err } } - wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + logger.Info("Finished sending evidence", + "node", testnet.Name, + "amount", amount, + "height", evidenceHeight, + ) + + wctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) + // wait for the node to make progress after submitting + // evidence (3 (forged height) + 1 (progress)) + _, err = waitForNode(wctx, targetNode, evidenceHeight+4) if err != nil { return err } - logger.Info(fmt.Sprintf("Finished sending evidence (height %d)", blockRes.Block.Height+2)) - return nil } @@ -147,6 +142,7 @@ func getPrivateValidatorKeys(testnet *e2e.Testnet) ([]types.MockPV, error) { // creates evidence of a lunatic attack. The height provided is the common height. // The forged height happens 2 blocks later. func generateLightClientAttackEvidence( + ctx context.Context, privVals []types.MockPV, height int64, vals *types.ValidatorSet, @@ -161,7 +157,7 @@ func generateLightClientAttackEvidence( // add a new bogus validator and remove an existing one to // vary the validator set slightly - pv, conflictingVals, err := mutateValidatorSet(privVals, vals) + pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals) if err != nil { return nil, err } @@ -171,7 +167,8 @@ func generateLightClientAttackEvidence( // create a commit for the forged header blockID := makeBlockID(header.Hash(), 1000, []byte("partshash")) voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) - commit, err := factory.MakeCommit(blockID, forgedHeight, 0, voteSet, pv, forgedTime) + + commit, err := factory.MakeCommit(ctx, blockID, forgedHeight, 0, voteSet, pv, forgedTime) if err != nil { return nil, err } @@ -197,6 +194,7 @@ func generateLightClientAttackEvidence( // generateDuplicateVoteEvidence picks a random validator from the val set and // returns duplicate vote evidence against the validator func generateDuplicateVoteEvidence( + ctx context.Context, privVals []types.MockPV, height int64, vals *types.ValidatorSet, @@ -207,11 +205,12 @@ func generateDuplicateVoteEvidence( if err != nil { return nil, err } - voteA, err := factory.MakeVote(privVal, chainID, valIdx, height, 0, 2, makeRandomBlockID(), time) + + voteA, err := factory.MakeVote(ctx, privVal, chainID, valIdx, height, 0, 2, makeRandomBlockID(), time) if err != nil { return nil, err } - voteB, err := factory.MakeVote(privVal, chainID, valIdx, height, 0, 2, makeRandomBlockID(), time) + voteB, err := factory.MakeVote(ctx, privVal, chainID, valIdx, height, 0, 2, makeRandomBlockID(), time) if err != nil { return nil, err } @@ -237,12 +236,12 @@ func getRandomValidatorIndex(privVals []types.MockPV, vals *types.ValidatorSet) } func readPrivKey(keyFilePath string) (crypto.PrivKey, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } pvKey := privval.FilePVKey{} - err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + err = json.Unmarshal(keyJSONBytes, &pvKey) if err != nil { return nil, fmt.Errorf("error reading PrivValidator key from %v: %w", keyFilePath, err) } @@ -289,9 +288,12 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } } -func mutateValidatorSet(privVals []types.MockPV, vals *types.ValidatorSet, +func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *types.ValidatorSet, ) ([]types.PrivValidator, *types.ValidatorSet, error) { - newVal, newPrivVal := factory.RandValidator(false, 10) + newVal, newPrivVal, err := factory.Validator(ctx, 10) + if err != nil { + return nil, nil, err + } var newVals *types.ValidatorSet if vals.Size() > 2 { diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index f31b436dd4..674972d545 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -7,6 +7,7 @@ import ( "math/rand" "time" + tmrand "github.com/tendermint/tendermint/libs/rand" rpchttp "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" @@ -85,7 +86,7 @@ func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { // generation is primarily the result of backpressure from the // broadcast transaction, though there is still some timer-based // limiting. -func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int64, networkSize int) { +func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int, networkSize int) { timer := time.NewTimer(0) defer timer.Stop() defer close(chTx) @@ -97,16 +98,11 @@ func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSiz case <-timer.C: } - // We keep generating the same 100 keys over and over, with different values. - // This gives a reasonable load without putting too much data in the app. - id := rand.Int63() % 100 // nolint: gosec + // Constrain the key space to avoid using too much + // space, while reduce the size of the data in the app. + id := r.Int63n(100) - bz := make([]byte, txSize) - _, err := r.Read(bz) - if err != nil { - panic(fmt.Sprintf("Failed to read random bytes: %v", err)) - } - tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz)) + tx := types.Tx(fmt.Sprintf("load-%X=%s", id, tmrand.StrFromSource(r, txSize))) select { case <-ctx.Done(): diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index fb6ce4a8cb..83d1327be9 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -16,7 +16,7 @@ import ( const randomSeed = 2308084734268 -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) func main() { NewCLI().Run() @@ -61,7 +61,7 @@ func NewCLI() *CLI { logger.Info("Preserving testnet that encountered error", "err", err) } else if err := Cleanup(cli.testnet); err != nil { - logger.Error("Error cleaning up testnet contents", "err", err) + logger.Error("error cleaning up testnet contents", "err", err) } }() if err = Setup(cli.testnet); err != nil { @@ -302,7 +302,7 @@ Does not run any perbutations. } defer func() { if err := Cleanup(cli.testnet); err != nil { - logger.Error("Error cleaning up testnet contents", "err", err) + logger.Error("error cleaning up testnet contents", "err", err) } }() @@ -319,8 +319,7 @@ Does not run any perbutations. lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(lctx, r, cli.testnet) - chLoadResult <- err + chLoadResult <- Load(lctx, r, cli.testnet) }() if err := Start(ctx, cli.testnet); err != nil { diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index ad5fa7a64d..4ca8cc0163 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -70,9 +70,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients[node.Name] = client } - wctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - result, err := client.Status(wctx) + result, err := client.Status(ctx) if err != nil { continue } @@ -130,6 +128,8 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty // waitForNode waits for a node to become available and catch up to the given block height. func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.ResultStatus, error) { + // If the node is the light client or seed note, we do not check for the last height. + // The light client and seed note can be behind the full node and validator if node.Mode == e2e.ModeSeed { return nil, nil } @@ -169,7 +169,10 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) case errors.Is(err, context.Canceled): return nil, err - case err == nil && status.SyncInfo.LatestBlockHeight >= height: + // If the node is the light client, it is not essential to wait for it to catch up, but we must return status info + case err == nil && node.Mode == e2e.ModeLight: + return status, nil + case err == nil && node.Mode != e2e.ModeLight && status.SyncInfo.LatestBlockHeight >= height: return status, nil case counter%500 == 0: switch { diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 9bf76c874e..f9e08579e2 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -51,7 +50,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) if err != nil { return err } @@ -84,13 +83,15 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - config.WriteConfigFile(nodeDir, cfg) // panics + if err := config.WriteConfigFile(nodeDir, cfg); err != nil { + return err + } appCfg, err := MakeAppConfig(node) if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err } @@ -110,17 +111,23 @@ func Setup(testnet *e2e.Testnet) error { return err } - (privval.NewFilePV(node.PrivvalKey, + err = (privval.NewFilePV(node.PrivvalKey, filepath.Join(nodeDir, PrivvalKeyFile), filepath.Join(nodeDir, PrivvalStateFile), )).Save() + if err != nil { + return err + } // Set up a dummy validator. Tendermint requires a file PV even when not used, so we // give it a dummy such that it will fail if it actually tries to use it. - (privval.NewFilePV(ed25519.GenPrivKey(), + err = (privval.NewFilePV(ed25519.GenPrivKey(), filepath.Join(nodeDir, PrivvalDummyKeyFile), filepath.Join(nodeDir, PrivvalDummyStateFile), )).Save() + if err != nil { + return err + } } return nil @@ -290,15 +297,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { return nil, fmt.Errorf("unexpected mode %q", node.Mode) } - if node.Mempool != "" { - cfg.Mempool.Version = node.Mempool - } - - cfg.BlockSync.Enable = true - if node.BlockSync == "" { - cfg.BlockSync.Enable = false - } - switch node.StateSync { case e2e.StateSyncP2P: cfg.StateSync.Enable = true @@ -318,12 +316,12 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { } } - cfg.P2P.Seeds = "" + cfg.P2P.Seeds = "" //nolint: staticcheck for _, seed := range node.Seeds { - if len(cfg.P2P.Seeds) > 0 { - cfg.P2P.Seeds += "," + if len(cfg.P2P.Seeds) > 0 { //nolint: staticcheck + cfg.P2P.Seeds += "," //nolint: staticcheck } - cfg.P2P.Seeds += seed.AddressP2P(true) + cfg.P2P.Seeds += seed.AddressP2P(true) //nolint: staticcheck } cfg.P2P.PersistentPeers = "" @@ -413,11 +411,11 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { // FIXME Apparently there's no function to simply load a config file without // involving the entire Viper apparatus, so we'll just resort to regexps. - bz, err := ioutil.ReadFile(cfgPath) + bz, err := os.ReadFile(cfgPath) if err != nil { return err } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) - return ioutil.WriteFile(cfgPath, bz, 0644) + return os.WriteFile(cfgPath, bz, 0644) } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 967d2519cf..43ff2eef37 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -118,8 +118,17 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { wcancel() node.HasStarted = true + + var lastNodeHeight int64 + + // If the node is a light client, we fetch its current height + if node.Mode == e2e.ModeLight { + lastNodeHeight = status.LightClientInfo.LastTrustedHeight + } else { + lastNodeHeight = status.SyncInfo.LatestBlockHeight + } logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", - node.Name, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) + node.Name, node.ProxyPort, lastNodeHeight)) } return nil diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index ac24b0cd27..0748797aa2 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -15,5 +15,5 @@ func Test(testnet *e2e.Testnet) error { return err } - return execVerbose("./build/tests", "-test.count", "1") + return execVerbose("./build/tests", "-test.count=1", "-test.v") } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index ab6f9739e4..7234a5cde7 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -11,14 +11,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) +const ( + randomSeed = 4827085738 +) + // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { if len(node.Testnet.InitialState) == 0 { return } @@ -37,7 +42,7 @@ func TestApp_InitialState(t *testing.T) { // Tests that the app hash (as reported by the app) matches the last // block and the node sync status. func TestApp_Hash(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) info, err := client.ABCIInfo(ctx) @@ -46,12 +51,15 @@ func TestApp_Hash(t *testing.T) { status, err := client.Status(ctx) require.NoError(t, err) + require.NotZero(t, status.SyncInfo.LatestBlockHeight) block, err := client.Block(ctx, &info.Response.LastBlockHeight) require.NoError(t, err) if info.Response.LastBlockHeight == block.Block.Height { - require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), + require.Equal(t, + fmt.Sprintf("%x", info.Response.LastBlockAppHash), + fmt.Sprintf("%x", block.Block.AppHash.Bytes()), "app hash does not match last block's app hash") } @@ -60,6 +68,29 @@ func TestApp_Hash(t *testing.T) { }) } +// Tests that the app and blockstore have and report the same height. +func TestApp_Height(t *testing.T) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { + client, err := node.Client() + require.NoError(t, err) + info, err := client.ABCIInfo(ctx) + require.NoError(t, err) + require.NotZero(t, info.Response.LastBlockHeight) + + status, err := client.Status(ctx) + require.NoError(t, err) + require.NotZero(t, status.SyncInfo.LatestBlockHeight) + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) + require.NoError(t, err) + + require.Equal(t, info.Response.LastBlockHeight, block.Block.Height) + + require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, + "status out of sync with application") + }) +} + // Tests that we can set a value and retrieve it. func TestApp_Tx(t *testing.T) { type broadcastFunc func(context.Context, types.Tx) error @@ -113,26 +144,19 @@ func TestApp_Tx(t *testing.T) { }, } + r := rand.New(rand.NewSource(randomSeed)) for idx, test := range testCases { if test.ShouldSkip { continue } t.Run(test.Name, func(t *testing.T) { - // testNode calls t.Parallel as well, so we should - // have a copy of the test := testCases[idx] - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) - // Generate a random value, to prevent duplicate tx errors when - // manually running the test multiple times for a testnet. - bz := make([]byte, 32) - _, err = rand.Read(bz) - require.NoError(t, err) - key := fmt.Sprintf("testapp-tx-%v", node.Name) - value := fmt.Sprintf("%x", bz) + value := tmrand.StrFromSource(r, 32) tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) require.NoError(t, test.BroadcastTx(client)(ctx, tx)) diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index f83cf37577..509605cde5 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -11,8 +12,11 @@ import ( // Tests that block headers are identical across nodes where present. func TestBlock_Header(t *testing.T) { - blocks := fetchBlockChain(t) - testNode(t, func(t *testing.T, node e2e.Node) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -50,7 +54,7 @@ func TestBlock_Header(t *testing.T) { // Tests that the node contains the expected block range. func TestBlock_Range(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index a645fd7c14..7b3d829d1c 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -3,6 +3,7 @@ package e2e_test import ( "context" "os" + "sort" "sync" "testing" @@ -22,7 +23,6 @@ func init() { } var ( - ctx = context.Background() testnetCache = map[string]e2e.Testnet{} testnetCacheMtx = sync.Mutex{} blocksCache = map[string][]*types.Block{} @@ -37,7 +37,7 @@ var ( // these tests are skipped so that they're not picked up during normal unit // test runs. If E2E_NODE is also set, only the specified node is tested, // otherwise all nodes are tested. -func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { +func testNode(t *testing.T, testFunc func(context.Context, *testing.T, e2e.Node)) { t.Helper() testnet := loadTestnet(t) @@ -47,6 +47,10 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { node := testnet.LookupNode(name) require.NotNil(t, node, "node %q not found in testnet %q", name, testnet.Name) nodes = []*e2e.Node{node} + } else { + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Name < nodes[j].Name + }) } for _, node := range nodes { @@ -57,8 +61,10 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { } t.Run(node.Name, func(t *testing.T) { - t.Parallel() - testFunc(t, node) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testFunc(ctx, t, node) }) } } @@ -86,7 +92,7 @@ func loadTestnet(t *testing.T) e2e.Testnet { // fetchBlockChain fetches a complete, up-to-date block history from // the freshest testnet archive node. -func fetchBlockChain(t *testing.T) []*types.Block { +func fetchBlockChain(ctx context.Context, t *testing.T) []*types.Block { t.Helper() testnet := loadTestnet(t) diff --git a/test/e2e/tests/evidence_test.go b/test/e2e/tests/evidence_test.go index f7f2ede790..9d0a1d255b 100644 --- a/test/e2e/tests/evidence_test.go +++ b/test/e2e/tests/evidence_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -9,7 +10,10 @@ import ( // assert that all nodes that have blocks at the height of a misbehavior has evidence // for that misbehavior func TestEvidence_Misbehavior(t *testing.T) { - blocks := fetchBlockChain(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) testnet := loadTestnet(t) seenEvidence := 0 for _, block := range blocks { diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index e6ff27a0e3..71a9584122 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ func TestNet_Peers(t *testing.T) { // FIXME Skip test since nodes aren't always able to fully mesh t.SkipNow() - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) netInfo, err := client.NetInfo(ctx) diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 8292e86ee3..6d8b4c64de 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "bytes" + "context" "testing" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( // Tests that validator sets are available and correct according to // scheduled validator updates. func TestValidator_Sets(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -36,7 +37,7 @@ func TestValidator_Sets(t *testing.T) { } valSchedule := newValidatorSchedule(*node.Testnet) - valSchedule.Increment(first - node.Testnet.InitialHeight) + require.NoError(t, valSchedule.Increment(first-node.Testnet.InitialHeight)) for h := first; h <= last; h++ { validators := []*types.Validator{} @@ -51,7 +52,7 @@ func TestValidator_Sets(t *testing.T) { } require.Equal(t, valSchedule.Set.Validators, validators, "incorrect validator set at height %v", h) - valSchedule.Increment(1) + require.NoError(t, valSchedule.Increment(1)) } }) } @@ -59,8 +60,11 @@ func TestValidator_Sets(t *testing.T) { // Tests that a validator proposes blocks when it's supposed to. It tolerates some // missed blocks, e.g. due to testnet perturbations. func TestValidator_Propose(t *testing.T) { - blocks := fetchBlockChain(t) - testNode(t, func(t *testing.T, node e2e.Node) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { if node.Mode != e2e.ModeValidator { return } @@ -76,7 +80,7 @@ func TestValidator_Propose(t *testing.T) { proposeCount++ } } - valSchedule.Increment(1) + require.NoError(t, valSchedule.Increment(1)) } require.False(t, proposeCount == 0 && expectCount > 0, @@ -90,8 +94,11 @@ func TestValidator_Propose(t *testing.T) { // Tests that a validator signs blocks when it's supposed to. It tolerates some // missed blocks, e.g. due to testnet perturbations. func TestValidator_Sign(t *testing.T) { - blocks := fetchBlockChain(t) - testNode(t, func(t *testing.T, node e2e.Node) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { if node.Mode != e2e.ModeValidator { return } @@ -116,7 +123,7 @@ func TestValidator_Sign(t *testing.T) { } else { require.False(t, signed, "unexpected signature for block %v", block.LastCommit.Height) } - valSchedule.Increment(1) + require.NoError(t, valSchedule.Increment(1)) } require.False(t, signCount == 0 && expectCount > 0, @@ -147,7 +154,7 @@ func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { } } -func (s *validatorSchedule) Increment(heights int64) { +func (s *validatorSchedule) Increment(heights int64) error { for i := int64(0); i < heights; i++ { s.height++ if s.height > 2 { @@ -155,12 +162,13 @@ func (s *validatorSchedule) Increment(heights int64) { // two blocks after they're returned. if update, ok := s.updates[s.height-2]; ok { if err := s.Set.UpdateWithChangeSet(makeVals(update)); err != nil { - panic(err) + return err } } } s.Set.IncrementProposerPriority(1) } + return nil } func makeVals(valMap map[*e2e.Node]int64) []*types.Validator { diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile index 3d34e0a43b..3bf4486b8c 100644 --- a/test/fuzz/Makefile +++ b/test/fuzz/Makefile @@ -1,38 +1,15 @@ #!/usr/bin/make -f -.PHONY: fuzz-mempool-v1 -fuzz-mempool-v1: - cd mempool/v1 && \ +.PHONY: fuzz-mempool +fuzz-mempool: + cd mempool && \ rm -f *-fuzz.zip && \ go-fuzz-build && \ go-fuzz -.PHONY: fuzz-mempool-v0 -fuzz-mempool-v0: - cd mempool/v0 && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-addrbook -fuzz-p2p-addrbook: - cd p2p/addrbook && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-pex -fuzz-p2p-pex: - cd p2p/pex && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - .PHONY: fuzz-p2p-sc fuzz-p2p-sc: - cd p2p/secret_connection && \ + cd p2p/secretconnection && \ rm -f *-fuzz.zip && \ go run ./init-corpus/main.go && \ go-fuzz-build && \ diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/checktx.go similarity index 53% rename from test/fuzz/mempool/v1/checktx.go rename to test/fuzz/mempool/checktx.go index 2ed0b97ff5..ba60d72ccc 100644 --- a/test/fuzz/mempool/v1/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -7,16 +7,17 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/libs/log" ) -var mp mempool.Mempool +var mp *mempool.TxMempool +var getMp func() mempool.Mempool func init() { app := kvstore.NewApplication() cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() - err := appConnMem.Start() + appConnMem, _ := cc(log.NewNopLogger()) + err := appConnMem.Start(context.TODO()) if err != nil { panic(err) } @@ -24,11 +25,22 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0) + getMp = func() mempool.Mempool { + if mp == nil { + mp = mempool.NewTxMempool( + log.NewNopLogger(), + cfg, + appConnMem, + 0, + ) + + } + return mp + } } func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) + err := getMp().CheckTx(context.Background(), data, nil, mempool.TxInfo{}) if err != nil { return 0 } diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/fuzz_test.go similarity index 78% rename from test/fuzz/mempool/v0/fuzz_test.go rename to test/fuzz/mempool/fuzz_test.go index 4f8f1e9c8e..8af0326ddd 100644 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ b/test/fuzz/mempool/fuzz_test.go @@ -1,13 +1,13 @@ -package v0_test +package mempool_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" - mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" + mempool "github.com/tendermint/tendermint/test/fuzz/mempool" ) const testdataCasesDir = "testdata/cases" @@ -25,9 +25,9 @@ func TestMempoolTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) - mempoolv0.Fuzz(input) + mempool.Fuzz(input) }) } } diff --git a/test/fuzz/mempool/v0/testdata/cases/empty b/test/fuzz/mempool/testdata/cases/empty similarity index 100% rename from test/fuzz/mempool/v0/testdata/cases/empty rename to test/fuzz/mempool/testdata/cases/empty diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/v0/checktx.go deleted file mode 100644 index 62eda97295..0000000000 --- a/test/fuzz/mempool/v0/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v0 - -import ( - "context" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv0.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go deleted file mode 100644 index 863697a0af..0000000000 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" -) - -const testdataCasesDir = "testdata/cases" - -func TestMempoolTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - mempoolv1.Fuzz(input) - }) - } -} diff --git a/test/fuzz/mempool/v1/testdata/cases/empty b/test/fuzz/mempool/v1/testdata/cases/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go index d48dc42670..1f3757aa0d 100644 --- a/test/fuzz/p2p/secretconnection/fuzz_test.go +++ b/test/fuzz/p2p/secretconnection/fuzz_test.go @@ -1,7 +1,7 @@ package secretconnection_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestSecretConnectionTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) secretconnection.Fuzz(input) }) diff --git a/test/fuzz/p2p/secretconnection/init-corpus/main.go b/test/fuzz/p2p/secretconnection/init-corpus/main.go index 635f2d99f9..3a2537ff78 100644 --- a/test/fuzz/p2p/secretconnection/init-corpus/main.go +++ b/test/fuzz/p2p/secretconnection/init-corpus/main.go @@ -4,7 +4,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "log" "os" "path/filepath" @@ -39,7 +38,7 @@ func initCorpus(baseDir string) { for i, datum := range data { filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) - if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { + if err := os.WriteFile(filename, []byte(datum), 0644); err != nil { log.Fatalf("can't write %v to %q: %v", datum, filename, err) } diff --git a/test/fuzz/p2p/secretconnection/read_write.go b/test/fuzz/p2p/secretconnection/read_write.go index 9701460f50..87d547e55c 100644 --- a/test/fuzz/p2p/secretconnection/read_write.go +++ b/test/fuzz/p2p/secretconnection/read_write.go @@ -7,8 +7,8 @@ import ( "log" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/libs/async" sc "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/async" ) func Fuzz(data []byte) int { diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go index 50b9194fec..41911e725c 100644 --- a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go +++ b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go @@ -1,7 +1,7 @@ package server_test import ( - "io/ioutil" + "io" "os" "path/filepath" "testing" @@ -25,7 +25,7 @@ func TestServerTestdataCases(t *testing.T) { }() f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) require.NoError(t, err) - input, err := ioutil.ReadAll(f) + input, err := io.ReadAll(f) require.NoError(t, err) server.Fuzz(input) }) diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go index 08f7e2b6b8..92b02329f9 100644 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ b/test/fuzz/rpc/jsonrpc/server/handler.go @@ -2,24 +2,27 @@ package server import ( "bytes" + "context" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "github.com/tendermint/tendermint/libs/log" rs "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var rpcFuncMap = map[string]*rs.RPCFunc{ - "c": rs.NewRPCFunc(func(s string, i int) (string, int) { return "foo", 200 }, "s,i", false), + "c": rs.NewRPCFunc(func(ctx context.Context, s string, i int) (string, error) { + return "foo", nil + }, "s", "i"), } var mux *http.ServeMux func init() { mux = http.NewServeMux() - lgr := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + lgr := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) rs.RegisterRPCFuncs(mux, rpcFuncMap, lgr) } @@ -32,7 +35,7 @@ func Fuzz(data []byte) int { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { panic(err) } diff --git a/third_party/proto/gogoproto/gogo.proto b/third_party/proto/gogoproto/gogo.proto index 27960ecfbe..31c516cd03 100644 --- a/third_party/proto/gogoproto/gogo.proto +++ b/third_party/proto/gogoproto/gogo.proto @@ -144,4 +144,4 @@ extend google.protobuf.FieldOptions { optional bool wktpointer = 65012; optional string castrepeated = 65013; -} \ No newline at end of file +} diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index 5008226904..0000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGO_PROTOBUF_VERSION=1.3.2 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin diff --git a/tools/tm-signer-harness/Dockerfile b/tools/tm-signer-harness/Dockerfile deleted file mode 100644 index 83f57a3d52..0000000000 --- a/tools/tm-signer-harness/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -ARG TENDERMINT_VERSION=latest -FROM tendermint/tendermint:${TENDERMINT_VERSION} - -COPY tm-signer-harness /usr/bin/tm-signer-harness diff --git a/tools/tm-signer-harness/Makefile b/tools/tm-signer-harness/Makefile deleted file mode 100644 index fc41571088..0000000000 --- a/tools/tm-signer-harness/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -.PHONY: build install docker-image - -TENDERMINT_VERSION?=latest -BUILD_TAGS?='tendermint' -VERSION := $(shell git describe --always) -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)" - -.DEFAULT_GOAL := build - -build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o ../../build/tm-signer-harness main.go - -install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) . - -docker-image: - GOOS=linux GOARCH=amd64 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o tm-signer-harness main.go - docker build \ - --build-arg TENDERMINT_VERSION=$(TENDERMINT_VERSION) \ - -t tendermint/tm-signer-harness:$(TENDERMINT_VERSION) . - rm -rf tm-signer-harness diff --git a/tools/tm-signer-harness/README.md b/tools/tm-signer-harness/README.md deleted file mode 100644 index 7add3a9977..0000000000 --- a/tools/tm-signer-harness/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# tm-signer-harness - -See the [`tm-signer-harness` -documentation](https://tendermint.com/docs/tools/remote-signer-validation.html) -for more details. diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go deleted file mode 100644 index 96eaaaff04..0000000000 --- a/tools/tm-signer-harness/internal/test_harness.go +++ /dev/null @@ -1,427 +0,0 @@ -package internal - -import ( - "bytes" - "context" - "fmt" - "net" - "os" - "os/signal" - "time" - - "github.com/tendermint/tendermint/crypto/tmhash" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/privval" - - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmos "github.com/tendermint/tendermint/libs/os" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" -) - -// Test harness error codes (which act as exit codes when the test harness fails). -const ( - NoError int = iota // 0 - ErrInvalidParameters // 1 - ErrMaxAcceptRetriesReached // 2 - ErrFailedToLoadGenesisFile // 3 - ErrFailedToCreateListener // 4 - ErrFailedToStartListener // 5 - ErrInterrupted // 6 - ErrOther // 7 - ErrTestPublicKeyFailed // 8 - ErrTestSignProposalFailed // 9 - ErrTestSignVoteFailed // 10 -) - -var voteTypes = []tmproto.SignedMsgType{tmproto.PrevoteType, tmproto.PrecommitType} - -// TestHarnessError allows us to keep track of which exit code should be used -// when exiting the main program. -type TestHarnessError struct { - Code int // The exit code to return - Err error // The original error - Info string // Any additional information -} - -var _ error = (*TestHarnessError)(nil) - -// TestHarness allows for testing of a remote signer to ensure compatibility -// with this version of Tendermint. -type TestHarness struct { - addr string - signerClient *privval.SignerClient - fpv *privval.FilePV - chainID string - acceptRetries int - logger log.Logger - exitWhenComplete bool - exitCode int -} - -// TestHarnessConfig provides configuration to set up a remote signer test -// harness. -type TestHarnessConfig struct { - BindAddr string - - KeyFile string - StateFile string - GenesisFile string - - AcceptDeadline time.Duration - ConnDeadline time.Duration - AcceptRetries int - - SecretConnKey ed25519.PrivKey - - ExitWhenComplete bool // Whether or not to call os.Exit when the harness has completed. -} - -// timeoutError can be used to check if an error returned from the netp package -// was due to a timeout. -type timeoutError interface { - Timeout() bool -} - -// NewTestHarness will load Tendermint data from the given files (including -// validator public/private keypairs and chain details) and create a new -// harness. -func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { - keyFile := ExpandPath(cfg.KeyFile) - stateFile := ExpandPath(cfg.StateFile) - logger.Info("Loading private validator configuration", "keyFile", keyFile, "stateFile", stateFile) - // NOTE: LoadFilePV ultimately calls os.Exit on failure. No error will be - // returned if this call fails. - fpv, err := privval.LoadFilePV(keyFile, stateFile) - if err != nil { - return nil, err - } - - genesisFile := ExpandPath(cfg.GenesisFile) - logger.Info("Loading chain ID from genesis file", "genesisFile", genesisFile) - st, err := state.MakeGenesisDocFromFile(genesisFile) - if err != nil { - return nil, newTestHarnessError(ErrFailedToLoadGenesisFile, err, genesisFile) - } - logger.Info("Loaded genesis file", "chainID", st.ChainID) - - spv, err := newTestHarnessListener(logger, cfg) - if err != nil { - return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") - } - - signerClient, err := privval.NewSignerClient(spv, st.ChainID) - if err != nil { - return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") - } - - return &TestHarness{ - addr: cfg.BindAddr, - signerClient: signerClient, - fpv: fpv, - chainID: st.ChainID, - acceptRetries: cfg.AcceptRetries, - logger: logger, - exitWhenComplete: cfg.ExitWhenComplete, - exitCode: 0, - }, nil -} - -// Run will execute the tests associated with this test harness. The intention -// here is to call this from one's `main` function, as the way it succeeds or -// fails at present is to call os.Exit() with an exit code related to the error -// that caused the tests to fail, or exit code 0 on success. -func (th *TestHarness) Run() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - go func() { - for sig := range c { - th.logger.Info("Caught interrupt, terminating...", "sig", sig) - th.Shutdown(newTestHarnessError(ErrInterrupted, nil, "")) - } - }() - - th.logger.Info("Starting test harness") - accepted := false - var startErr error - - for acceptRetries := th.acceptRetries; acceptRetries > 0; acceptRetries-- { - th.logger.Info("Attempting to accept incoming connection", "acceptRetries", acceptRetries) - - if err := th.signerClient.WaitForConnection(10 * time.Millisecond); err != nil { - // if it wasn't a timeout error - if _, ok := err.(timeoutError); !ok { - th.logger.Error("Failed to start listener", "err", err) - th.Shutdown(newTestHarnessError(ErrFailedToStartListener, err, "")) - // we need the return statements in case this is being run - // from a unit test - otherwise this function will just die - // when os.Exit is called - return - } - startErr = err - } else { - th.logger.Info("Accepted external connection") - accepted = true - break - } - } - if !accepted { - th.logger.Error("Maximum accept retries reached", "acceptRetries", th.acceptRetries) - th.Shutdown(newTestHarnessError(ErrMaxAcceptRetriesReached, startErr, "")) - return - } - - // Run the tests - if err := th.TestPublicKey(); err != nil { - th.Shutdown(err) - return - } - if err := th.TestSignProposal(); err != nil { - th.Shutdown(err) - return - } - if err := th.TestSignVote(); err != nil { - th.Shutdown(err) - return - } - th.logger.Info("SUCCESS! All tests passed.") - th.Shutdown(nil) -} - -// TestPublicKey just validates that we can (1) fetch the public key from the -// remote signer, and (2) it matches the public key we've configured for our -// local Tendermint version. -func (th *TestHarness) TestPublicKey() error { - th.logger.Info("TEST: Public key of remote signer") - fpvk, err := th.fpv.GetPubKey(context.Background()) - if err != nil { - return err - } - th.logger.Info("Local", "pubKey", fpvk) - sck, err := th.signerClient.GetPubKey(context.Background()) - if err != nil { - return err - } - th.logger.Info("Remote", "pubKey", sck) - if !bytes.Equal(fpvk.Bytes(), sck.Bytes()) { - th.logger.Error("FAILED: Local and remote public keys do not match") - return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") - } - return nil -} - -// TestSignProposal makes sure the remote signer can successfully sign -// proposals. -func (th *TestHarness) TestSignProposal() error { - th.logger.Info("TEST: Signing of proposals") - // sha256 hash of "hash" - hash := tmhash.Sum([]byte("hash")) - prop := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 100, - Round: 0, - POLRound: -1, - BlockID: types.BlockID{ - Hash: hash, - PartSetHeader: types.PartSetHeader{ - Hash: hash, - Total: 1000000, - }, - }, - Timestamp: time.Now(), - } - p := prop.ToProto() - propBytes := types.ProposalSignBytes(th.chainID, p) - if err := th.signerClient.SignProposal(context.Background(), th.chainID, p); err != nil { - th.logger.Error("FAILED: Signing of proposal", "err", err) - return newTestHarnessError(ErrTestSignProposalFailed, err, "") - } - prop.Signature = p.Signature - th.logger.Debug("Signed proposal", "prop", prop) - // first check that it's a basically valid proposal - if err := prop.ValidateBasic(); err != nil { - th.logger.Error("FAILED: Signed proposal is invalid", "err", err) - return newTestHarnessError(ErrTestSignProposalFailed, err, "") - } - sck, err := th.signerClient.GetPubKey(context.Background()) - if err != nil { - return err - } - // now validate the signature on the proposal - if sck.VerifySignature(propBytes, prop.Signature) { - th.logger.Info("Successfully validated proposal signature") - } else { - th.logger.Error("FAILED: Proposal signature validation failed") - return newTestHarnessError(ErrTestSignProposalFailed, nil, "signature validation failed") - } - return nil -} - -// TestSignVote makes sure the remote signer can successfully sign all kinds of -// votes. -func (th *TestHarness) TestSignVote() error { - th.logger.Info("TEST: Signing of votes") - for _, voteType := range voteTypes { - th.logger.Info("Testing vote type", "type", voteType) - hash := tmhash.Sum([]byte("hash")) - vote := &types.Vote{ - Type: voteType, - Height: 101, - Round: 0, - BlockID: types.BlockID{ - Hash: hash, - PartSetHeader: types.PartSetHeader{ - Hash: hash, - Total: 1000000, - }, - }, - ValidatorIndex: 0, - ValidatorAddress: tmhash.SumTruncated([]byte("addr")), - Timestamp: time.Now(), - } - v := vote.ToProto() - voteBytes := types.VoteSignBytes(th.chainID, v) - // sign the vote - if err := th.signerClient.SignVote(context.Background(), th.chainID, v); err != nil { - th.logger.Error("FAILED: Signing of vote", "err", err) - return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) - } - vote.Signature = v.Signature - th.logger.Debug("Signed vote", "vote", vote) - // validate the contents of the vote - if err := vote.ValidateBasic(); err != nil { - th.logger.Error("FAILED: Signed vote is invalid", "err", err) - return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) - } - sck, err := th.signerClient.GetPubKey(context.Background()) - if err != nil { - return err - } - - // now validate the signature on the proposal - if sck.VerifySignature(voteBytes, vote.Signature) { - th.logger.Info("Successfully validated vote signature", "type", voteType) - } else { - th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) - return newTestHarnessError(ErrTestSignVoteFailed, nil, "signature validation failed") - } - } - return nil -} - -// Shutdown will kill the test harness and attempt to close all open sockets -// gracefully. If the supplied error is nil, it is assumed that the exit code -// should be 0. If err is not nil, it will exit with an exit code related to the -// error. -func (th *TestHarness) Shutdown(err error) { - var exitCode int - - if err == nil { - exitCode = NoError - } else if therr, ok := err.(*TestHarnessError); ok { - exitCode = therr.Code - } else { - exitCode = ErrOther - } - th.exitCode = exitCode - - // in case sc.Stop() takes too long - if th.exitWhenComplete { - go func() { - time.Sleep(time.Duration(5) * time.Second) - th.logger.Error("Forcibly exiting program after timeout") - os.Exit(exitCode) - }() - } - - err = th.signerClient.Close() - if err != nil { - th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) - } - - if th.exitWhenComplete { - os.Exit(exitCode) - } -} - -// newTestHarnessListener creates our client instance which we will use for testing. -func newTestHarnessListener(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerListenerEndpoint, error) { - proto, addr := tmnet.ProtocolAndAddress(cfg.BindAddr) - if proto == "unix" { - // make sure the socket doesn't exist - if so, try to delete it - if tmos.FileExists(addr) { - if err := os.Remove(addr); err != nil { - logger.Error("Failed to remove existing Unix domain socket", "addr", addr) - return nil, err - } - } - } - ln, err := net.Listen(proto, addr) - if err != nil { - return nil, err - } - logger.Info("Listening", "proto", proto, "addr", addr) - var svln net.Listener - switch proto { - case "unix": - unixLn := privval.NewUnixListener(ln) - privval.UnixListenerTimeoutAccept(cfg.AcceptDeadline)(unixLn) - privval.UnixListenerTimeoutReadWrite(cfg.ConnDeadline)(unixLn) - svln = unixLn - case "tcp": - tcpLn := privval.NewTCPListener(ln, cfg.SecretConnKey) - privval.TCPListenerTimeoutAccept(cfg.AcceptDeadline)(tcpLn) - privval.TCPListenerTimeoutReadWrite(cfg.ConnDeadline)(tcpLn) - logger.Info("Resolved TCP address for listener", "addr", tcpLn.Addr()) - svln = tcpLn - default: - _ = ln.Close() - logger.Error("Unsupported protocol (must be unix:// or tcp://)", "proto", proto) - return nil, newTestHarnessError(ErrInvalidParameters, nil, fmt.Sprintf("Unsupported protocol: %s", proto)) - } - return privval.NewSignerListenerEndpoint(logger, svln), nil -} - -func newTestHarnessError(code int, err error, info string) *TestHarnessError { - return &TestHarnessError{ - Code: code, - Err: err, - Info: info, - } -} - -func (e *TestHarnessError) Error() string { - var msg string - switch e.Code { - case ErrInvalidParameters: - msg = "Invalid parameters supplied to application" - case ErrMaxAcceptRetriesReached: - msg = "Maximum accept retries reached" - case ErrFailedToLoadGenesisFile: - msg = "Failed to load genesis file" - case ErrFailedToCreateListener: - msg = "Failed to create listener" - case ErrFailedToStartListener: - msg = "Failed to start listener" - case ErrInterrupted: - msg = "Interrupted" - case ErrTestPublicKeyFailed: - msg = "Public key validation test failed" - case ErrTestSignProposalFailed: - msg = "Proposal signing validation test failed" - case ErrTestSignVoteFailed: - msg = "Vote signing validation test failed" - default: - msg = "Unknown error" - } - if len(e.Info) > 0 { - msg = fmt.Sprintf("%s: %s", msg, e.Info) - } - if e.Err != nil { - msg = fmt.Sprintf("%s (original error: %s)", msg, e.Err.Error()) - } - return msg -} diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go deleted file mode 100644 index cf22bc8362..0000000000 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package internal - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" -) - -const ( - keyFileContents = `{ - "address": "D08FCA3BA74CF17CBFC15E64F9505302BB0E2748", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ZCsuTjaczEyon70nmKxwvwu+jqrbq5OH3yQjcK0SFxc=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "8O39AkQsoe1sBQwud/Kdul8lg8K9SFsql9aZvwXQSt1kKy5ONpzMTKifvSeYrHC/C76Oqturk4ffJCNwrRIXFw==" - } -}` - - stateFileContents = `{ - "height": "0", - "round": 0, - "step": 0 -}` - - genesisFileContents = `{ - "genesis_time": "2019-01-15T11:56:34.8963Z", - "chain_id": "test-chain-0XwP5E", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_num": 50 - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - } - }, - "validators": [ - { - "address": "D08FCA3BA74CF17CBFC15E64F9505302BB0E2748", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ZCsuTjaczEyon70nmKxwvwu+jqrbq5OH3yQjcK0SFxc=" - }, - "power": "10", - "name": "" - } - ], - "app_hash": "" -}` - - defaultConnDeadline = 100 -) - -func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { - cfg := makeConfig(t, 1, 2) - defer cleanup(cfg) - - th, err := NewTestHarness(log.TestingLogger(), cfg) - require.NoError(t, err) - th.Run() - assert.Equal(t, ErrMaxAcceptRetriesReached, th.exitCode) -} - -func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, false) - }, - NoError, - ) -} - -func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - return newMockSignerServer(t, th, ed25519.GenPrivKey(), false, false) - }, - ErrTestPublicKeyFailed, - ) -} - -func TestRemoteSignerProposalSigningFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - return newMockSignerServer(t, th, th.fpv.Key.PrivKey, true, false) - }, - ErrTestSignProposalFailed, - ) -} - -func TestRemoteSignerVoteSigningFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - return newMockSignerServer(t, th, th.fpv.Key.PrivKey, false, true) - }, - ErrTestSignVoteFailed, - ) -} - -func newMockSignerServer( - t *testing.T, - th *TestHarness, - privKey crypto.PrivKey, - breakProposalSigning bool, - breakVoteSigning bool, -) *privval.SignerServer { - mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) - - dialerEndpoint := privval.NewSignerDialerEndpoint( - th.logger, - privval.DialTCPFn( - th.addr, - time.Duration(defaultConnDeadline)*time.Millisecond, - ed25519.GenPrivKey(), - ), - ) - - return privval.NewSignerServer(dialerEndpoint, th.chainID, mockPV) -} - -// For running relatively standard tests. -func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { - cfg := makeConfig(t, 100, 3) - defer cleanup(cfg) - - th, err := NewTestHarness(log.TestingLogger(), cfg) - require.NoError(t, err) - donec := make(chan struct{}) - go func() { - defer close(donec) - th.Run() - }() - - ss := signerServerMaker(th) - require.NoError(t, ss.Start()) - assert.True(t, ss.IsRunning()) - defer ss.Stop() //nolint:errcheck // ignore for tests - - <-donec - assert.Equal(t, expectedExitCode, th.exitCode) -} - -func makeConfig(t *testing.T, acceptDeadline, acceptRetries int) TestHarnessConfig { - return TestHarnessConfig{ - BindAddr: privval.GetFreeLocalhostAddrPort(), - KeyFile: makeTempFile("tm-testharness-keyfile", keyFileContents), - StateFile: makeTempFile("tm-testharness-statefile", stateFileContents), - GenesisFile: makeTempFile("tm-testharness-genesisfile", genesisFileContents), - AcceptDeadline: time.Duration(acceptDeadline) * time.Millisecond, - ConnDeadline: time.Duration(defaultConnDeadline) * time.Millisecond, - AcceptRetries: acceptRetries, - SecretConnKey: ed25519.GenPrivKey(), - ExitWhenComplete: false, - } -} - -func cleanup(cfg TestHarnessConfig) { - os.Remove(cfg.KeyFile) - os.Remove(cfg.StateFile) - os.Remove(cfg.GenesisFile) -} - -func makeTempFile(name, content string) string { - tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name)) - if err != nil { - panic(err) - } - if _, err := tempFile.Write([]byte(content)); err != nil { - tempFile.Close() - panic(err) - } - if err := tempFile.Close(); err != nil { - panic(err) - } - return tempFile.Name() -} diff --git a/tools/tm-signer-harness/internal/utils.go b/tools/tm-signer-harness/internal/utils.go deleted file mode 100644 index 9783ca95b3..0000000000 --- a/tools/tm-signer-harness/internal/utils.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import ( - "os/user" - "path/filepath" - "strings" -) - -// ExpandPath will check if the given path begins with a "~" symbol, and if so, -// will expand it to become the user's home directory. If it fails to expand the -// path it will automatically return the original path itself. -func ExpandPath(path string) string { - usr, err := user.Current() - if err != nil { - return path - } - - if path == "~" { - return usr.HomeDir - } else if strings.HasPrefix(path, "~/") { - return filepath.Join(usr.HomeDir, path[2:]) - } - - return path -} diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go deleted file mode 100644 index a6d1312a17..0000000000 --- a/tools/tm-signer-harness/main.go +++ /dev/null @@ -1,200 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/tools/tm-signer-harness/internal" - "github.com/tendermint/tendermint/version" -) - -const ( - defaultAcceptRetries = 100 - defaultBindAddr = "tcp://127.0.0.1:0" - defaultAcceptDeadline = 1 - defaultConnDeadline = 3 - defaultExtractKeyOutput = "./signing.key" -) - -var defaultTMHome string - -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - -// Command line flags -var ( - flagAcceptRetries int - flagBindAddr string - flagTMHome string - flagKeyOutputPath string -) - -// Command line commands -var ( - rootCmd *flag.FlagSet - runCmd *flag.FlagSet - extractKeyCmd *flag.FlagSet - versionCmd *flag.FlagSet -) - -func init() { - rootCmd = flag.NewFlagSet("root", flag.ExitOnError) - rootCmd.Usage = func() { - fmt.Println(`Remote signer test harness for Tendermint. - -Usage: - tm-signer-harness [flags] - -Available Commands: - extract_key Extracts a signing key from a local Tendermint instance - help Help on the available commands - run Runs the test harness - version Display version information and exit - -Use "tm-signer-harness help " for more information about that command.`) - fmt.Println("") - } - - hd, err := os.UserHomeDir() - if err != nil { - fmt.Println("The UserHomeDir is not defined, setting the default TM Home PATH to \"~/.tendermint\"") - defaultTMHome = "~/.tendermint" - } else { - defaultTMHome = fmt.Sprintf("%s/.tendermint", hd) - } - - runCmd = flag.NewFlagSet("run", flag.ExitOnError) - runCmd.IntVar(&flagAcceptRetries, - "accept-retries", - defaultAcceptRetries, - "The number of attempts to listen for incoming connections") - runCmd.StringVar(&flagBindAddr, "addr", defaultBindAddr, "Bind to this address for the testing") - runCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory") - runCmd.Usage = func() { - fmt.Println(`Runs the remote signer test harness for Tendermint. - -Usage: - tm-signer-harness run [flags] - -Flags:`) - runCmd.PrintDefaults() - fmt.Println("") - } - - extractKeyCmd = flag.NewFlagSet("extract_key", flag.ExitOnError) - extractKeyCmd.StringVar(&flagKeyOutputPath, - "output", - defaultExtractKeyOutput, - "Path to which signing key should be written") - extractKeyCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory") - extractKeyCmd.Usage = func() { - fmt.Println(`Extracts a signing key from a local Tendermint instance for use in the remote -signer under test. - -Usage: - tm-signer-harness extract_key [flags] - -Flags:`) - extractKeyCmd.PrintDefaults() - fmt.Println("") - } - - versionCmd = flag.NewFlagSet("version", flag.ExitOnError) - versionCmd.Usage = func() { - fmt.Println(` -Prints the Tendermint version for which this remote signer harness was built. - -Usage: - tm-signer-harness version`) - fmt.Println("") - } -} - -func runTestHarness(acceptRetries int, bindAddr, tmhome string) { - tmhome = internal.ExpandPath(tmhome) - cfg := internal.TestHarnessConfig{ - BindAddr: bindAddr, - KeyFile: filepath.Join(tmhome, "config", "priv_validator_key.json"), - StateFile: filepath.Join(tmhome, "data", "priv_validator_state.json"), - GenesisFile: filepath.Join(tmhome, "config", "genesis.json"), - AcceptDeadline: time.Duration(defaultAcceptDeadline) * time.Second, - AcceptRetries: acceptRetries, - ConnDeadline: time.Duration(defaultConnDeadline) * time.Second, - SecretConnKey: ed25519.GenPrivKey(), - ExitWhenComplete: true, - } - harness, err := internal.NewTestHarness(logger, cfg) - if err != nil { - logger.Error(err.Error()) - if therr, ok := err.(*internal.TestHarnessError); ok { - os.Exit(therr.Code) - } - os.Exit(internal.ErrOther) - } - harness.Run() -} - -func extractKey(tmhome, outputPath string) { - keyFile := filepath.Join(internal.ExpandPath(tmhome), "config", "priv_validator_key.json") - stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json") - fpv, err := privval.LoadFilePV(keyFile, stateFile) - if err != nil { - logger.Error("Can't load file pv", "err", err) - os.Exit(1) - } - pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey)) - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { - logger.Info("Failed to write private key", "output", outputPath, "err", err) - os.Exit(1) - } - logger.Info("Successfully wrote private key", "output", outputPath) -} - -func main() { - if err := rootCmd.Parse(os.Args[1:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - if rootCmd.NArg() == 0 || (rootCmd.NArg() == 1 && rootCmd.Arg(0) == "help") { - rootCmd.Usage() - os.Exit(0) - } - - switch rootCmd.Arg(0) { - case "help": - switch rootCmd.Arg(1) { - case "run": - runCmd.Usage() - case "extract_key": - extractKeyCmd.Usage() - case "version": - versionCmd.Usage() - default: - fmt.Printf("Unrecognized command: %s\n", rootCmd.Arg(1)) - os.Exit(1) - } - case "run": - if err := runCmd.Parse(os.Args[2:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome) - case "extract_key": - if err := extractKeyCmd.Parse(os.Args[2:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - extractKey(flagTMHome, flagKeyOutputPath) - case "version": - fmt.Println(version.TMVersion) - default: - fmt.Printf("Unrecognized command: %s\n", flag.Arg(0)) - os.Exit(1) - } -} diff --git a/types/block.go b/types/block.go index 1c44125ac0..193ae4305b 100644 --- a/types/block.go +++ b/types/block.go @@ -7,6 +7,7 @@ import ( "math" "sort" "strings" + "sync" "time" "github.com/celestiaorg/nmt/namespace" @@ -17,7 +18,6 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" @@ -46,7 +46,7 @@ const ( // Block defines the atomic unit of a Tendermint blockchain. type Block struct { - mtx tmsync.Mutex + mtx sync.Mutex Header `json:"header"` Data `json:"data"` @@ -73,7 +73,7 @@ func (b *Block) ValidateBasic() error { return errors.New("nil LastCommit") } if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastCommit: %v", err) + return fmt.Errorf("wrong LastCommit: %w", err) } if w, g := b.LastCommit.Hash(), b.LastCommitHash; !bytes.Equal(w, g) { @@ -131,22 +131,22 @@ func (b *Block) Hash() tmbytes.HexBytes { // MakePartSet returns a PartSet containing parts of a serialized block. // This is the form in which the block is gossipped to peers. // CONTRACT: partSize is greater than zero. -func (b *Block) MakePartSet(partSize uint32) *PartSet { +func (b *Block) MakePartSet(partSize uint32) (*PartSet, error) { if b == nil { - return nil + return nil, errors.New("nil block") } b.mtx.Lock() defer b.mtx.Unlock() pbb, err := b.ToProto() if err != nil { - panic(err) + return nil, err } bz, err := proto.Marshal(pbb) if err != nil { - panic(err) + return nil, err } - return NewPartSetFromData(bz, partSize) + return NewPartSetFromData(bz, partSize), nil } // HashesTo is a convenience function that checks if a block hashes to the given argument. @@ -344,7 +344,7 @@ type Header struct { // basic block info Version version.Consensus `json:"version"` ChainID string `json:"chain_id"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` Time time.Time `json:"time"` // prev block info @@ -412,15 +412,15 @@ func (h Header) ValidateBasic() error { } if err := ValidateHash(h.LastCommitHash); err != nil { - return fmt.Errorf("wrong LastCommitHash: %v", err) + return fmt.Errorf("wrong LastCommitHash: %w", err) } if err := ValidateHash(h.DataHash); err != nil { - return fmt.Errorf("wrong DataHash: %v", err) + return fmt.Errorf("wrong DataHash: %w", err) } if err := ValidateHash(h.EvidenceHash); err != nil { - return fmt.Errorf("wrong EvidenceHash: %v", err) + return fmt.Errorf("wrong EvidenceHash: %w", err) } if len(h.ProposerAddress) != crypto.AddressSize { @@ -433,17 +433,17 @@ func (h Header) ValidateBasic() error { // Basic validation of hashes related to application data. // Will validate fully against state in state#ValidateBlock. if err := ValidateHash(h.ValidatorsHash); err != nil { - return fmt.Errorf("wrong ValidatorsHash: %v", err) + return fmt.Errorf("wrong ValidatorsHash: %w", err) } if err := ValidateHash(h.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong NextValidatorsHash: %v", err) + return fmt.Errorf("wrong NextValidatorsHash: %w", err) } if err := ValidateHash(h.ConsensusHash); err != nil { - return fmt.Errorf("wrong ConsensusHash: %v", err) + return fmt.Errorf("wrong ConsensusHash: %w", err) } // NOTE: AppHash is arbitrary length if err := ValidateHash(h.LastResultsHash); err != nil { - return fmt.Errorf("wrong LastResultsHash: %v", err) + return fmt.Errorf("wrong LastResultsHash: %w", err) } return nil @@ -528,7 +528,8 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastResultsHash, indent, h.EvidenceHash, indent, h.ProposerAddress, - indent, h.Hash()) + indent, h.Hash(), + ) } // ToProto converts Header to protobuf @@ -612,19 +613,21 @@ const ( // CommitSig is a part of the Vote included in a Commit. type CommitSig struct { - BlockIDFlag BlockIDFlag `json:"block_id_flag"` - ValidatorAddress Address `json:"validator_address"` - Timestamp time.Time `json:"timestamp"` - Signature []byte `json:"signature"` + BlockIDFlag BlockIDFlag `json:"block_id_flag"` + ValidatorAddress Address `json:"validator_address"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` + VoteExtension VoteExtensionToSign `json:"vote_extension"` } // NewCommitSigForBlock returns new CommitSig with BlockIDFlagCommit. -func NewCommitSigForBlock(signature []byte, valAddr Address, ts time.Time) CommitSig { +func NewCommitSigForBlock(signature []byte, valAddr Address, ts time.Time, ext VoteExtensionToSign) CommitSig { return CommitSig{ BlockIDFlag: BlockIDFlagCommit, ValidatorAddress: valAddr, Timestamp: ts, Signature: signature, + VoteExtension: ext, } } @@ -657,12 +660,14 @@ func (cs CommitSig) Absent() bool { // 1. first 6 bytes of signature // 2. first 6 bytes of validator address // 3. block ID flag -// 4. timestamp +// 4. first 6 bytes of the vote extension +// 5. timestamp func (cs CommitSig) String() string { - return fmt.Sprintf("CommitSig{%X by %X on %v @ %s}", + return fmt.Sprintf("CommitSig{%X by %X on %v with %X @ %s}", tmbytes.Fingerprint(cs.Signature), tmbytes.Fingerprint(cs.ValidatorAddress), cs.BlockIDFlag, + tmbytes.Fingerprint(cs.VoteExtension.BytesPacked()), CanonicalTime(cs.Timestamp)) } @@ -734,6 +739,7 @@ func (cs *CommitSig) ToProto() *tmproto.CommitSig { ValidatorAddress: cs.ValidatorAddress, Timestamp: cs.Timestamp, Signature: cs.Signature, + VoteExtension: cs.VoteExtension.ToProto(), } } @@ -745,6 +751,7 @@ func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error { cs.ValidatorAddress = csp.ValidatorAddress cs.Timestamp = csp.Timestamp cs.Signature = csp.Signature + cs.VoteExtension = VoteExtensionToSignFromProto(csp.VoteExtension) return cs.ValidateBasic() } @@ -758,7 +765,7 @@ type Commit struct { // ValidatorSet order. // Any peer with a block can gossip signatures by index with a peer without // recalculating the active ValidatorSet. - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` BlockID BlockID `json:"block_id"` Signatures []CommitSig `json:"signatures"` @@ -791,7 +798,7 @@ func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSe } added, err := voteSet.AddVote(commit.GetVote(int32(idx))) if !added || err != nil { - panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) + panic(fmt.Errorf("failed to reconstruct LastCommit: %w", err)) } } return voteSet @@ -811,6 +818,7 @@ func (commit *Commit) GetVote(valIdx int32) *Vote { ValidatorAddress: commitSig.ValidatorAddress, ValidatorIndex: valIdx, Signature: commitSig.Signature, + VoteExtension: commitSig.VoteExtension.ToVoteExtension(), } } @@ -893,7 +901,7 @@ func (commit *Commit) ValidateBasic() error { } if commit.Height >= 1 { - if commit.BlockID.IsZero() { + if commit.BlockID.IsNil() { return errors.New("commit cannot be for nil block") } @@ -1101,6 +1109,15 @@ func (data *Data) ComputeShares() (NamespacedShares, int) { tailShares...), curLen } +// DataFromPreparedProposal parsed the data returned from the app after calling +// the PrepareProposal ABCI method +// TODO: actually implement +func DataFromPreparedProposal(processedData [][]byte) (Data, error) { + return Data{ + Txs: ToTxs(processedData), + }, nil +} + // paddedLen calculates the number of shares needed to make a power of 2 square // given the current number of shares func paddedLen(length int) int { @@ -1454,8 +1471,8 @@ func (blockID BlockID) ValidateBasic() error { return nil } -// IsZero returns true if this is the BlockID of a nil block. -func (blockID BlockID) IsZero() bool { +// IsNil returns true if this is the BlockID of a nil block. +func (blockID BlockID) IsNil() bool { return len(blockID.Hash) == 0 && blockID.PartSetHeader.IsZero() } diff --git a/types/block_test.go b/types/block_test.go index 858d9890ce..645279ec1b 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -35,15 +35,19 @@ func TestMain(m *testing.M) { } func TestBlockAddEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") + require.NoError(t, err) evList := []Evidence{ev} block := MakeBlock(h, txs, evList, nil, nil, commit) @@ -53,17 +57,22 @@ func TestBlockAddEvidence(t *testing.T) { } func TestBlockValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, (*Block)(nil).ValidateBasic()) txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + require.NoError(t, err) - ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") + require.NoError(t, err) evList := []Evidence{ev} testCases := []struct { @@ -123,41 +132,58 @@ func TestBlockHash(t *testing.T) { } func TestBlockMakePartSet(t *testing.T) { - assert.Nil(t, (*Block)(nil).MakePartSet(2)) + bps, err := (*Block)(nil).MakePartSet(2) + assert.Error(t, err) + assert.Nil(t, bps) - partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil, nil).MakePartSet(1024) + partSet, err := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil, nil, nil).MakePartSet(1024) + require.NoError(t, err) assert.NotNil(t, partSet) assert.EqualValues(t, 1, partSet.Total()) } func TestBlockMakePartSetWithEvidence(t *testing.T) { - assert.Nil(t, (*Block)(nil).MakePartSet(2)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bps, err := (*Block)(nil).MakePartSet(2) + assert.Error(t, err) + assert.Nil(t, bps) lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + require.NoError(t, err) - ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") + require.NoError(t, err) evList := []Evidence{ev} - partSet := MakeBlock(h, []Tx{Tx("Hello World")}, evList, nil, nil, commit).MakePartSet(512) + partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, evList, nil, nil, commit).MakePartSet(512) + require.NoError(t, err) + assert.NotNil(t, partSet) assert.EqualValues(t, 4, partSet.Total()) } func TestBlockHashesTo(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.False(t, (*Block)(nil).HashesTo(nil)) lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain") + require.NoError(t, err) evList := []Evidence{ev} block := MakeBlock(h, []Tx{Tx("Hello World")}, evList, nil, nil, commit) @@ -219,10 +245,13 @@ func TestNilHeaderHashDoesntCrash(t *testing.T) { } func TestCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) assert.Equal(t, h-1, commit.Height) @@ -253,7 +282,11 @@ func TestCommitValidateBasic(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.testName, func(t *testing.T) { - com := randCommit(time.Now()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + com := randCommit(ctx, t, time.Now()) + tc.malleateCommit(com) assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) @@ -326,7 +359,7 @@ func TestHeaderHash(t *testing.T) { LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: crypto.AddressHash([]byte("proposer_address")), - }, hexBytesFromString("F740121F553B5418C3EFBD343C2DBFE9E007BB67B0D020A0741374BAB65242A4")}, + }, hexBytesFromString(t, "F740121F553B5418C3EFBD343C2DBFE9E007BB67B0D020A0741374BAB65242A4")}, {"nil header yields nil", nil, nil}, {"nil ValidatorsHash yields nil", &Header{ Version: version.Consensus{Block: 1, App: 2}, @@ -430,22 +463,24 @@ func TestMaxHeaderBytes(t *testing.T) { assert.EqualValues(t, MaxHeaderBytes, int64(len(bz))) } -func randCommit(now time.Time) *Commit { +func randCommit(ctx context.Context, t *testing.T, now time.Time) *Commit { + t.Helper() lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, now) - if err != nil { - panic(err) - } + voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, now) + + require.NoError(t, err) + return commit } -func hexBytesFromString(s string) bytes.HexBytes { +func hexBytesFromString(t *testing.T, s string) bytes.HexBytes { + t.Helper() + b, err := hex.DecodeString(s) - if err != nil { - panic(err) - } + require.NoError(t, err) + return bytes.HexBytes(b) } @@ -514,8 +549,12 @@ func TestCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) - commit, err := makeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, h-1, 1, voteSet, vals, time.Now()) + assert.NoError(t, err) chainID := voteSet.ChainID() @@ -545,6 +584,9 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { round = 0 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type commitVoteTest struct { blockIDs []BlockID numVotes []int // must sum to numValidators @@ -557,12 +599,12 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, tmproto.PrecommitType, tc.numValidators, 1) + voteSet, valSet, vals := randVoteSet(ctx, t, height-1, round, tmproto.PrecommitType, tc.numValidators, 1) vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - pubKey, err := vals[vi].GetPubKey(context.Background()) + pubKey, err := vals[vi].GetPubKey(ctx) require.NoError(t, err) vote := &Vote{ ValidatorAddress: pubKey.Address(), @@ -574,7 +616,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { Timestamp: tmtime.Now(), } - added, err := signAddVote(vals[vi], vote, voteSet) + added, err := signAddVote(ctx, vals[vi], vote, voteSet) assert.NoError(t, err) assert.True(t, added) @@ -586,7 +628,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { commit := voteSet.MakeCommit() // panics without > 2/3 valid votes assert.NotNil(t, commit) err := valSet.VerifyCommit(voteSet.ChainID(), blockID, height-1, commit) - assert.Nil(t, err) + assert.NoError(t, err) } else { assert.Panics(t, func() { voteSet.MakeCommit() }) } @@ -634,16 +676,20 @@ func TestBlockIDValidateBasic(t *testing.T) { } func TestBlockProtoBuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + h := mrand.Int63() - c1 := randCommit(time.Now()) + c1 := randCommit(ctx, t, time.Now()) b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, []Evidence{}, nil, nil, &Commit{Signatures: []CommitSig{}}) b1.ProposerAddress = tmrand.Bytes(crypto.AddressSize) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) - evi := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") - b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, []Evidence{evi}, nil, nil, c1) + evi, err := NewMockDuplicateVoteEvidence(ctx, h, evidenceTime, "block-test-chain") + require.NoError(t, err) + b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, EvidenceList{evi}, nil, nil, &Commit{Signatures: []CommitSig{}}) b2.ProposerAddress = tmrand.Bytes(crypto.AddressSize) - b2.Data.Evidence.ByteSize() + // b2.EvidenceHash = b2.Evidence.Hash() b3 := MakeBlock(h, []Tx{}, []Evidence{}, nil, nil, c1) b3.ProposerAddress = tmrand.Bytes(crypto.AddressSize) @@ -668,8 +714,10 @@ func TestBlockProtoBuf(t *testing.T) { block, err := BlockFromProto(pb) if tc.expPass2 { - require.NoError(t, err, tc.msg) - require.EqualValues(t, tc.b1.Header, block.Header, tc.msg) + block.Data.Evidence.byteSize = tc.b1.Evidence.byteSize + assert.NoError(t, err, tc.msg) + tc.b1.Hash() + assert.EqualValues(t, tc.b1.Header, block.Header, tc.msg) require.EqualValues(t, tc.b1.Data, block.Data, tc.msg) // todo require.EqualValues(t, tc.b1.Evidence.Evidence, block.Evidence.Evidence, tc.msg) require.EqualValues(t, *tc.b1.LastCommit, *block.LastCommit, tc.msg) @@ -710,8 +758,12 @@ func TestDataProtoBuf(t *testing.T) { // TestEvidenceDataProtoBuf ensures parity in converting to and from proto. func TestEvidenceDataProtoBuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const chainID = "mychain" - ev := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), chainID) + ev, err := NewMockDuplicateVoteEvidence(ctx, math.MaxInt64, time.Now(), chainID) + require.NoError(t, err) data := &EvidenceData{Evidence: EvidenceList{ev}} _ = data.ByteSize() testCases := []struct { @@ -825,7 +877,11 @@ func TestBlockIDProtoBuf(t *testing.T) { } func TestSignedHeaderProtoBuf(t *testing.T) { - commit := randCommit(time.Now()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + commit := randCommit(ctx, t, time.Now()) + h := MakeRandHeader() sh := SignedHeader{Header: &h, Commit: commit} diff --git a/types/canonical.go b/types/canonical.go index 01c3d851dd..803b6a3af2 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -21,7 +21,7 @@ func CanonicalizeBlockID(bid tmproto.BlockID) *tmproto.CanonicalBlockID { panic(err) } var cbid *tmproto.CanonicalBlockID - if rbid == nil || rbid.IsZero() { + if rbid == nil || rbid.IsNil() { cbid = nil } else { cbid = &tmproto.CanonicalBlockID{ @@ -51,6 +51,15 @@ func CanonicalizeProposal(chainID string, proposal *tmproto.Proposal) tmproto.Ca } } +func GetVoteExtensionToSign(ext *tmproto.VoteExtension) *tmproto.VoteExtensionToSign { + if ext == nil { + return nil + } + return &tmproto.VoteExtensionToSign{ + AppDataToSign: ext.AppDataToSign, + } +} + // CanonicalizeVote transforms the given Vote to a CanonicalVote, which does // not contain ValidatorIndex and ValidatorAddress fields. func CanonicalizeVote(chainID string, vote *tmproto.Vote) tmproto.CanonicalVote { diff --git a/types/event_bus.go b/types/event_bus.go deleted file mode 100644 index dfe3a06644..0000000000 --- a/types/event_bus.go +++ /dev/null @@ -1,326 +0,0 @@ -package types - -import ( - "context" - "fmt" - "strings" - - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultCapacity = 0 - -type EventBusSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) - Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error - UnsubscribeAll(ctx context.Context, subscriber string) error - - NumClients() int - NumClientSubscriptions(clientID string) int -} - -type Subscription interface { - ID() string - Out() <-chan tmpubsub.Message - Canceled() <-chan struct{} - Err() error -} - -// EventBus is a common bus for all events going through the system. All calls -// are proxied to underlying pubsub server. All events must be published using -// EventBus to ensure correct data types. -type EventBus struct { - service.BaseService - pubsub *tmpubsub.Server -} - -// NewEventBus returns a new event bus. -func NewEventBus() *EventBus { - return NewEventBusWithBufferCapacity(defaultCapacity) -} - -// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. -func NewEventBusWithBufferCapacity(cap int) *EventBus { - // capacity could be exposed later if needed - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) - b := &EventBus{pubsub: pubsub} - b.BaseService = *service.NewBaseService(nil, "EventBus", b) - return b -} - -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - -func (b *EventBus) OnStart() error { - return b.pubsub.Start() -} - -func (b *EventBus) OnStop() { - if err := b.pubsub.Stop(); err != nil { - b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) - } -} - -func (b *EventBus) NumClients() int { - return b.pubsub.NumClients() -} - -func (b *EventBus) NumClientSubscriptions(clientID string) int { - return b.pubsub.NumClientSubscriptions(clientID) -} - -func (b *EventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - outCapacity ...int, -) (Subscription, error) { - return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) -} - -// This method can be used for a local consensus explorer and synchronous -// testing. Do not use for for public facing / untrusted subscriptions! -func (b *EventBus) SubscribeUnbuffered( - ctx context.Context, - subscriber string, - query tmpubsub.Query, -) (Subscription, error) { - return b.pubsub.SubscribeUnbuffered(ctx, subscriber, query) -} - -func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return b.pubsub.Unsubscribe(ctx, args) -} - -func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return b.pubsub.UnsubscribeAll(ctx, subscriber) -} - -func (b *EventBus) Publish(eventValue string, eventData TMEventData) error { - // no explicit deadline for publishing events - ctx := context.Background() - - tokens := strings.Split(EventTypeKey, ".") - event := types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: eventValue, - }, - }, - } - - return b.pubsub.PublishWithEvents(ctx, eventData, []types.Event{event}) -} - -func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block event - events = append(events, EventNewBlock) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block header event - events = append(events, EventNewBlockHeader) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return b.Publish(EventNewEvidenceValue, evidence) -} - -func (b *EventBus) PublishEventVote(data EventDataVote) error { - return b.Publish(EventVoteValue, data) -} - -func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { - return b.Publish(EventValidBlockValue, data) -} - -func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return b.Publish(EventBlockSyncStatusValue, data) -} - -func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return b.Publish(EventStateSyncStatusValue, data) -} - -// PublishEventTx publishes tx event with events from Result. Note it will add -// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys -// will be overwritten. -func (b *EventBus) PublishEventTx(data EventDataTx) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := data.Result.Events - - // add Tendermint-reserved events - events = append(events, EventTx) - - tokens := strings.Split(TxHashKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%X", Tx(data.Tx).Hash()), - }, - }, - }) - - tokens = strings.Split(TxHeightKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%d", data.Height), - }, - }, - }) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return b.Publish(EventNewRoundStepValue, data) -} - -func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return b.Publish(EventTimeoutProposeValue, data) -} - -func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return b.Publish(EventTimeoutWaitValue, data) -} - -func (b *EventBus) PublishEventNewRound(data EventDataNewRound) error { - return b.Publish(EventNewRoundValue, data) -} - -func (b *EventBus) PublishEventCompleteProposal(data EventDataCompleteProposal) error { - return b.Publish(EventCompleteProposalValue, data) -} - -func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { - return b.Publish(EventPolkaValue, data) -} - -func (b *EventBus) PublishEventUnlock(data EventDataRoundState) error { - return b.Publish(EventUnlockValue, data) -} - -func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { - return b.Publish(EventRelockValue, data) -} - -func (b *EventBus) PublishEventLock(data EventDataRoundState) error { - return b.Publish(EventLockValue, data) -} - -func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return b.Publish(EventValidatorSetUpdatesValue, data) -} - -//----------------------------------------------------------------------------- -type NopEventBus struct{} - -func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - out chan<- interface{}, -) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return nil -} - -func (NopEventBus) PublishEventVote(data EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(data EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { - return nil -} - -func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return nil -} - -func (NopEventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return nil -} diff --git a/types/event_bus_test.go b/types/event_bus_test.go deleted file mode 100644 index a42d5c89a0..0000000000 --- a/types/event_bus_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package types - -import ( - "context" - "fmt" - mrand "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestEventBusPublishEventTx(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - - // PublishEventTx adds 3 composite keys, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) - txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-txsSub.Out() - edt := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), edt.Height) - assert.Equal(t, uint32(0), edt.Index) - assert.EqualValues(t, tx, edt.Tx) - assert.Equal(t, result, edt.Result) - close(done) - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a transaction after 1 sec.") - } -} - -func TestEventBusPublishEventNewBlock(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - block := MakeBlock(0, []Tx{}, []Evidence{}, nil, nil, nil) - blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" - blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-blocksSub.Out() - edt := msg.Data().(EventDataNewBlock) - assert.Equal(t, block, edt.Block) - assert.Equal(t, blockID, edt.BlockID) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlock(EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block after 1 sec.") - } -} - -func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "recipient", Value: "bar"}, - {Key: "amount", Value: "5"}, - }, - }, - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "baz"}, - {Key: "recipient", Value: "cat"}, - {Key: "amount", Value: "13"}, - }, - }, - { - Type: "withdraw.rewards", - Attributes: []abci.EventAttribute{ - {Key: "address", Value: "bar"}, - {Key: "source", Value: "iceman"}, - {Key: "amount", Value: "33"}, - }, - }, - }, - } - - testCases := []struct { - query string - expectResults bool - }{ - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", - false, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", - false, - }, - } - - for i, tc := range testCases { - sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustParse(tc.query)) - require.NoError(t, err) - - done := make(chan struct{}) - - go func() { - select { - case msg := <-sub.Out(): - data := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), data.Height) - assert.Equal(t, uint32(0), data.Index) - assert.EqualValues(t, tx, data.Tx) - assert.Equal(t, result, data.Result) - close(done) - case <-time.After(1 * time.Second): - return - } - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - if !tc.expectResults { - require.Fail(t, "unexpected transaction result(s) from subscription") - } - case <-time.After(1 * time.Second): - if tc.expectResults { - require.Fail(t, "failed to receive a transaction after 1 second") - } - } - } -} - -func TestEventBusPublishEventNewBlockHeader(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - block := MakeBlock(0, []Tx{}, []Evidence{}, nil, nil, nil) - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" - headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-headersSub.Out() - edt := msg.Data().(EventDataNewBlockHeader) - assert.Equal(t, block.Header, edt.Header) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{ - Header: block.Header, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublishEventNewEvidence(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") - - query := "tm.event='NewEvidence'" - evSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-evSub.Out() - edt := msg.Data().(EventDataNewEvidence) - assert.Equal(t, ev, edt.Evidence) - assert.Equal(t, int64(4), edt.Height) - close(done) - }() - - err = eventBus.PublishEventNewEvidence(EventDataNewEvidence{ - Evidence: ev, - Height: 4, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublish(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - const numEventsExpected = 14 - - sub, err := eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, numEventsExpected) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - numEvents := 0 - for range sub.Out() { - numEvents++ - if numEvents >= numEventsExpected { - close(done) - return - } - } - }() - - err = eventBus.Publish(EventNewBlockHeaderValue, EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlock(EventDataNewBlock{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventVote(EventDataVote{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutWait(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRound(EventDataNewRound{}) - require.NoError(t, err) - err = eventBus.PublishEventCompleteProposal(EventDataCompleteProposal{}) - require.NoError(t, err) - err = eventBus.PublishEventPolka(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventUnlock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventRelock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventLock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) - require.NoError(t, err) - err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{}) - require.NoError(t, err) - err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{}) - require.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatalf("expected to receive %d events after 1 sec.", numEventsExpected) - } -} - -func BenchmarkEventBus(b *testing.B) { - benchmarks := []struct { - name string - numClients int - randQueries bool - randEvents bool - }{ - {"10Clients1Query1Event", 10, false, false}, - {"100Clients", 100, false, false}, - {"1000Clients", 1000, false, false}, - - {"10ClientsRandQueries1Event", 10, true, false}, - {"100Clients", 100, true, false}, - {"1000Clients", 1000, true, false}, - - {"10ClientsRandQueriesRandEvents", 10, true, true}, - {"100Clients", 100, true, true}, - {"1000Clients", 1000, true, true}, - - {"10Clients1QueryRandEvents", 10, false, true}, - {"100Clients", 100, false, true}, - {"1000Clients", 1000, false, true}, - } - - for _, bm := range benchmarks { - bm := bm - b.Run(bm.name, func(b *testing.B) { - benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) - }) - } -} - -func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { - // for random* functions - mrand.Seed(time.Now().Unix()) - - eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache - err := eventBus.Start() - if err != nil { - b.Error(err) - } - b.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := EventQueryNewBlock - - for i := 0; i < numClients; i++ { - if randQueries { - q = randQuery() - } - sub, err := eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-sub.Out(): - case <-sub.Canceled(): - return - } - } - }() - } - - eventValue := EventNewBlockValue - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if randEvents { - eventValue = randEventValue() - } - - err := eventBus.Publish(eventValue, EventDataString("Gamora")) - if err != nil { - b.Error(err) - } - } -} - -var events = []string{ - EventNewBlockValue, - EventNewBlockHeaderValue, - EventNewRoundValue, - EventNewRoundStepValue, - EventTimeoutProposeValue, - EventCompleteProposalValue, - EventPolkaValue, - EventUnlockValue, - EventLockValue, - EventRelockValue, - EventTimeoutWaitValue, - EventVoteValue, - EventBlockSyncStatusValue, - EventStateSyncStatusValue, -} - -func randEventValue() string { - - return events[mrand.Intn(len(events))] -} - -var queries = []tmpubsub.Query{ - EventQueryNewBlock, - EventQueryNewBlockHeader, - EventQueryNewRound, - EventQueryNewRoundStep, - EventQueryTimeoutPropose, - EventQueryCompleteProposal, - EventQueryPolka, - EventQueryUnlock, - EventQueryLock, - EventQueryRelock, - EventQueryTimeoutWait, - EventQueryVote, - EventQueryBlockSyncStatus, - EventQueryStateSyncStatus, -} - -func randQuery() tmpubsub.Query { - return queries[mrand.Intn(len(queries))] -} diff --git a/types/events.go b/types/events.go index 46f150abdf..2631760831 100644 --- a/types/events.go +++ b/types/events.go @@ -1,13 +1,13 @@ package types import ( + "context" "fmt" "strings" abci "github.com/tendermint/tendermint/abci/types" - tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/jsontypes" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" ) // Reserved event types (alphabetically sorted). @@ -38,7 +38,6 @@ const ( EventStateSyncStatusValue = "StateSyncStatus" EventTimeoutProposeValue = "TimeoutPropose" EventTimeoutWaitValue = "TimeoutWait" - EventUnlockValue = "Unlock" EventValidBlockValue = "ValidBlock" EventVoteValue = "Vote" ) @@ -88,24 +87,24 @@ var ( // ENCODING / DECODING -// TMEventData implements events.EventData. -type TMEventData interface { - // empty interface +// EventData is satisfied by types that can be published as event data. +type EventData interface { + jsontypes.Tagged } func init() { - tmjson.RegisterType(EventDataNewBlock{}, "tendermint/event/NewBlock") - tmjson.RegisterType(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader") - tmjson.RegisterType(EventDataNewEvidence{}, "tendermint/event/NewEvidence") - tmjson.RegisterType(EventDataTx{}, "tendermint/event/Tx") - tmjson.RegisterType(EventDataRoundState{}, "tendermint/event/RoundState") - tmjson.RegisterType(EventDataNewRound{}, "tendermint/event/NewRound") - tmjson.RegisterType(EventDataCompleteProposal{}, "tendermint/event/CompleteProposal") - tmjson.RegisterType(EventDataVote{}, "tendermint/event/Vote") - tmjson.RegisterType(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates") - tmjson.RegisterType(EventDataString(""), "tendermint/event/ProposalString") - tmjson.RegisterType(EventDataBlockSyncStatus{}, "tendermint/event/FastSyncStatus") - tmjson.RegisterType(EventDataStateSyncStatus{}, "tendermint/event/StateSyncStatus") + jsontypes.MustRegister(EventDataBlockSyncStatus{}) + jsontypes.MustRegister(EventDataCompleteProposal{}) + jsontypes.MustRegister(EventDataNewBlock{}) + jsontypes.MustRegister(EventDataNewBlockHeader{}) + jsontypes.MustRegister(EventDataNewEvidence{}) + jsontypes.MustRegister(EventDataNewRound{}) + jsontypes.MustRegister(EventDataRoundState{}) + jsontypes.MustRegister(EventDataStateSyncStatus{}) + jsontypes.MustRegister(EventDataTx{}) + jsontypes.MustRegister(EventDataValidatorSetUpdates{}) + jsontypes.MustRegister(EventDataVote{}) + jsontypes.MustRegister(EventDataString("")) } // Most event messages are basic types (a block, a transaction) @@ -119,6 +118,9 @@ type EventDataNewBlock struct { ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } + type EventDataNewBlockHeader struct { Header Header `json:"header"` @@ -127,17 +129,26 @@ type EventDataNewBlockHeader struct { ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewBlockHeader) TypeTag() string { return "tendermint/event/NewBlockHeader" } + type EventDataNewEvidence struct { Evidence Evidence `json:"evidence"` Height int64 `json:"height"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewEvidence) TypeTag() string { return "tendermint/event/NewEvidence" } + // All txs fire EventDataTx type EventDataTx struct { abci.TxResult } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataTx) TypeTag() string { return "tendermint/event/Tx" } + // NOTE: This goes into the replay WAL type EventDataRoundState struct { Height int64 `json:"height"` @@ -145,6 +156,9 @@ type EventDataRoundState struct { Step string `json:"step"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataRoundState) TypeTag() string { return "tendermint/event/RoundState" } + type ValidatorInfo struct { Address Address `json:"address"` Index int32 `json:"index"` @@ -158,6 +172,9 @@ type EventDataNewRound struct { Proposer ValidatorInfo `json:"proposer"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewRound) TypeTag() string { return "tendermint/event/NewRound" } + type EventDataCompleteProposal struct { Height int64 `json:"height"` Round int32 `json:"round"` @@ -166,16 +183,28 @@ type EventDataCompleteProposal struct { BlockID BlockID `json:"block_id"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataCompleteProposal) TypeTag() string { return "tendermint/event/CompleteProposal" } + type EventDataVote struct { Vote *Vote } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataVote) TypeTag() string { return "tendermint/event/Vote" } + type EventDataString string +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataString) TypeTag() string { return "tendermint/event/ProposalString" } + type EventDataValidatorSetUpdates struct { ValidatorUpdates []*Validator `json:"validator_updates"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataValidatorSetUpdates) TypeTag() string { return "tendermint/event/ValidatorSetUpdates" } + // EventDataBlockSyncStatus shows the fastsync status and the // height when the node state sync mechanism changes. type EventDataBlockSyncStatus struct { @@ -183,6 +212,9 @@ type EventDataBlockSyncStatus struct { Height int64 `json:"height"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataBlockSyncStatus) TypeTag() string { return "tendermint/event/FastSyncStatus" } + // EventDataStateSyncStatus shows the statesync status and the // height when the node state sync mechanism changes. type EventDataStateSyncStatus struct { @@ -190,6 +222,9 @@ type EventDataStateSyncStatus struct { Height int64 `json:"height"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataStateSyncStatus) TypeTag() string { return "tendermint/event/StateSyncStatus" } + // PUBSUB const ( @@ -223,7 +258,6 @@ var ( EventQueryTimeoutPropose = QueryForEvent(EventTimeoutProposeValue) EventQueryTimeoutWait = QueryForEvent(EventTimeoutWaitValue) EventQueryTx = QueryForEvent(EventTxValue) - EventQueryUnlock = QueryForEvent(EventUnlockValue) EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdatesValue) EventQueryValidBlock = QueryForEvent(EventValidBlockValue) EventQueryVote = QueryForEvent(EventVoteValue) @@ -231,23 +265,23 @@ var ( EventQueryStateSyncStatus = QueryForEvent(EventStateSyncStatusValue) ) -func EventQueryTxFor(tx Tx) tmpubsub.Query { - return tmquery.MustParse(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTxValue, TxHashKey, tx.Hash())) +func EventQueryTxFor(tx Tx) *tmquery.Query { + return tmquery.MustCompile(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTxValue, TxHashKey, tx.Hash())) } -func QueryForEvent(eventValue string) tmpubsub.Query { - return tmquery.MustParse(fmt.Sprintf("%s='%s'", EventTypeKey, eventValue)) +func QueryForEvent(eventValue string) *tmquery.Query { + return tmquery.MustCompile(fmt.Sprintf("%s='%s'", EventTypeKey, eventValue)) } // BlockEventPublisher publishes all block related events type BlockEventPublisher interface { - PublishEventNewBlock(block EventDataNewBlock) error - PublishEventNewBlockHeader(header EventDataNewBlockHeader) error - PublishEventNewEvidence(evidence EventDataNewEvidence) error - PublishEventTx(EventDataTx) error - PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error + PublishEventNewBlock(ctx context.Context, block EventDataNewBlock) error + PublishEventNewBlockHeader(ctx context.Context, header EventDataNewBlockHeader) error + PublishEventNewEvidence(ctx context.Context, evidence EventDataNewEvidence) error + PublishEventTx(context.Context, EventDataTx) error + PublishEventValidatorSetUpdates(context.Context, EventDataValidatorSetUpdates) error } type TxEventPublisher interface { - PublishEventTx(EventDataTx) error + PublishEventTx(context.Context, EventDataTx) error } diff --git a/types/events_test.go b/types/events_test.go index dcd998acec..bd4bde264a 100644 --- a/types/events_test.go +++ b/types/events_test.go @@ -10,18 +10,18 @@ import ( func TestQueryTxFor(t *testing.T) { tx := Tx("foo") assert.Equal(t, - fmt.Sprintf("tm.event='Tx' AND tx.hash='%X'", tx.Hash()), + fmt.Sprintf("tm.event = 'Tx' AND tx.hash = '%X'", tx.Hash()), EventQueryTxFor(tx).String(), ) } func TestQueryForEvent(t *testing.T) { assert.Equal(t, - "tm.event='NewBlock'", + "tm.event = 'NewBlock'", QueryForEvent(EventNewBlockValue).String(), ) assert.Equal(t, - "tm.event='NewEvidence'", + "tm.event = 'NewEvidence'", QueryForEvent(EventNewEvidenceValue).String(), ) } diff --git a/types/evidence.go b/types/evidence.go index 330850ea32..c9a60786ac 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/binary" + "encoding/json" "errors" "fmt" "sort" @@ -13,7 +14,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -28,6 +29,9 @@ type Evidence interface { String() string // string format of the evidence Time() time.Time // time of the infraction ValidateBasic() error // basic consistency check + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged } //-------------------------------------------------------------------------------------- @@ -38,11 +42,14 @@ type DuplicateVoteEvidence struct { VoteB *Vote `json:"vote_b"` // abci specific information - TotalVotingPower int64 - ValidatorPower int64 + TotalVotingPower int64 `json:",string"` + ValidatorPower int64 `json:",string"` Timestamp time.Time } +// TypeTag implements the jsontypes.Tagged interface. +func (*DuplicateVoteEvidence) TypeTag() string { return "tendermint/DuplicateVoteEvidence" } + var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given @@ -236,14 +243,17 @@ func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*Duplica // height, then nodes will treat this as of the Lunatic form, else it is of the Equivocation form. type LightClientAttackEvidence struct { ConflictingBlock *LightBlock - CommonHeight int64 + CommonHeight int64 `json:",string"` // abci specific information ByzantineValidators []*Validator // validators in the validator set that misbehaved in creating the conflicting block - TotalVotingPower int64 // total voting power of the validator set at the common height + TotalVotingPower int64 `json:",string"` // total voting power of the validator set at the common height Timestamp time.Time // timestamp of the block at the common height } +// TypeTag implements the jsontypes.Tagged interface. +func (*LightClientAttackEvidence) TypeTag() string { return "tendermint/LightClientAttackEvidence" } + var _ Evidence = &LightClientAttackEvidence{} // ABCI forms an array of abci evidence for each byzantine validator @@ -365,10 +375,10 @@ func (l *LightClientAttackEvidence) Height() int64 { // String returns a string representation of LightClientAttackEvidence func (l *LightClientAttackEvidence) String() string { return fmt.Sprintf(`LightClientAttackEvidence{ - ConflictingBlock: %v, - CommonHeight: %d, - ByzatineValidators: %v, - TotalVotingPower: %d, + ConflictingBlock: %v, + CommonHeight: %d, + ByzatineValidators: %v, + TotalVotingPower: %d, Timestamp: %v}#%X`, l.ConflictingBlock.String(), l.CommonHeight, l.ByzantineValidators, l.TotalVotingPower, l.Timestamp, l.Hash()) @@ -544,6 +554,33 @@ func LightClientAttackEvidenceFromProto(lpb *tmproto.LightClientAttackEvidence) // EvidenceList is a list of Evidence. Evidences is not a word. type EvidenceList []Evidence +func (evl EvidenceList) MarshalJSON() ([]byte, error) { + lst := make([]json.RawMessage, len(evl)) + for i, ev := range evl { + bits, err := jsontypes.Marshal(ev) + if err != nil { + return nil, err + } + lst[i] = bits + } + return json.Marshal(lst) +} + +func (evl *EvidenceList) UnmarshalJSON(data []byte) error { + var lst []json.RawMessage + if err := json.Unmarshal(data, &lst); err != nil { + return err + } + out := make([]Evidence, len(lst)) + for i, elt := range lst { + if err := jsontypes.Unmarshal(elt, &out[i]); err != nil { + return err + } + } + *evl = EvidenceList(out) + return nil +} + // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { // These allocations are required because Evidence is not of type Bytes, and @@ -628,8 +665,8 @@ func EvidenceFromProto(evidence *tmproto.Evidence) (Evidence, error) { } func init() { - tmjson.RegisterType(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence") - tmjson.RegisterType(&LightClientAttackEvidence{}, "tendermint/LightClientAttackEvidence") + jsontypes.MustRegister((*DuplicateVoteEvidence)(nil)) + jsontypes.MustRegister((*LightClientAttackEvidence)(nil)) } //-------------------------------------------- ERRORS -------------------------------------- @@ -671,29 +708,32 @@ func (err *ErrEvidenceOverflow) Error() string { // unstable - use only for testing // assumes the round to be 0 and the validator index to be 0 -func NewMockDuplicateVoteEvidence(height int64, time time.Time, chainID string) *DuplicateVoteEvidence { +func NewMockDuplicateVoteEvidence(ctx context.Context, height int64, time time.Time, chainID string) (*DuplicateVoteEvidence, error) { val := NewMockPV() - return NewMockDuplicateVoteEvidenceWithValidator(height, time, val, chainID) + return NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time, val, chainID) } // assumes voting power to be 10 and validator to be the only one in the set -func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, - pv PrivValidator, chainID string) *DuplicateVoteEvidence { - pubKey, _ := pv.GetPubKey(context.Background()) +func NewMockDuplicateVoteEvidenceWithValidator(ctx context.Context, height int64, time time.Time, pv PrivValidator, chainID string) (*DuplicateVoteEvidence, error) { + pubKey, err := pv.GetPubKey(ctx) + if err != nil { + return nil, err + } + val := NewValidator(pubKey, 10) voteA := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vA := voteA.ToProto() - _ = pv.SignVote(context.Background(), chainID, vA) + _ = pv.SignVote(ctx, chainID, vA) voteA.Signature = vA.Signature voteB := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vB := voteB.ToProto() - _ = pv.SignVote(context.Background(), chainID, vB) + _ = pv.SignVote(ctx, chainID, vB) voteB.Signature = vB.Signature ev, err := NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) if err != nil { - panic("constructing mock duplicate vote evidence: " + err.Error()) + return nil, fmt.Errorf("constructing mock duplicate vote evidence: %w", err) } - return ev + return ev, nil } func makeMockVote(height int64, round, index int32, addr Address, diff --git a/types/evidence_test.go b/types/evidence_test.go index 5110bcb1de..440531aa97 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -22,7 +22,10 @@ import ( var defaultVoteTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) func TestEvidenceList(t *testing.T) { - ev := randomDuplicateVoteEvidence(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ev := randomDuplicateVoteEvidence(ctx, t) evl := EvidenceList([]Evidence{ev}) assert.NotNil(t, evl.Hash()) @@ -30,14 +33,15 @@ func TestEvidenceList(t *testing.T) { assert.False(t, evl.Has(&DuplicateVoteEvidence{})) } -func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { +func randomDuplicateVoteEvidence(ctx context.Context, t *testing.T) *DuplicateVoteEvidence { + t.Helper() val := NewMockPV() blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ - VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), - VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime.Add(1*time.Minute)), + VoteA: makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), + VoteB: makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime.Add(1*time.Minute)), TotalVotingPower: 30, ValidatorPower: 10, Timestamp: defaultVoteTime, @@ -46,7 +50,11 @@ func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { func TestDuplicateVoteEvidence(t *testing.T) { const height = int64(13) - ev := NewMockDuplicateVoteEvidence(height, time.Now(), "mock-chain-id") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ev, err := NewMockDuplicateVoteEvidence(ctx, height, time.Now(), "mock-chain-id") + require.NoError(t, err) assert.Equal(t, ev.Hash(), tmhash.Sum(ev.Bytes())) assert.NotNil(t, ev.String()) assert.Equal(t, ev.Height(), height) @@ -58,6 +66,9 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) const chainID = "mychain" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testCases := []struct { testName string malleateEvidence func(*DuplicateVoteEvidence) @@ -71,7 +82,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { ev.VoteB = nil }, true}, {"Invalid vote type", func(ev *DuplicateVoteEvidence) { - ev.VoteA = makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0, blockID2, defaultVoteTime) + ev.VoteA = makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0, blockID2, defaultVoteTime) }, true}, {"Invalid vote order", func(ev *DuplicateVoteEvidence) { swap := ev.VoteA.Copy() @@ -82,9 +93,9 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.testName, func(t *testing.T) { - vote1 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) - vote2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) - valSet := NewValidatorSet([]*Validator{val.ExtractIntoValidator(10)}) + vote1 := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) + vote2 := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) + valSet := NewValidatorSet([]*Validator{val.ExtractIntoValidator(ctx, 10)}) ev, err := NewDuplicateVoteEvidence(vote1, vote2, defaultVoteTime, valSet) require.NoError(t, err) tc.malleateEvidence(ev) @@ -94,14 +105,18 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { } func TestLightClientAttackEvidenceBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height := int64(5) commonHeight := height - 1 nValidators := 10 - voteSet, valSet, privVals := randVoteSet(height, 1, tmproto.PrecommitType, nValidators, 1) + voteSet, valSet, privVals := randVoteSet(ctx, t, height, 1, tmproto.PrecommitType, nValidators, 1) + header := makeHeaderRandom() header.Height = height blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - commit, err := makeCommit(blockID, height, 1, voteSet, privVals, defaultVoteTime) + commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, defaultVoteTime) require.NoError(t, err) lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ @@ -153,15 +168,19 @@ func TestLightClientAttackEvidenceBasic(t *testing.T) { } func TestLightClientAttackEvidenceValidation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height := int64(5) commonHeight := height - 1 nValidators := 10 - voteSet, valSet, privVals := randVoteSet(height, 1, tmproto.PrecommitType, nValidators, 1) + voteSet, valSet, privVals := randVoteSet(ctx, t, height, 1, tmproto.PrecommitType, nValidators, 1) + header := makeHeaderRandom() header.Height = height header.ValidatorsHash = valSet.Hash() blockID := makeBlockID(header.Hash(), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - commit, err := makeCommit(blockID, height, 1, voteSet, privVals, time.Now()) + commit, err := makeCommit(ctx, blockID, height, 1, voteSet, privVals, time.Now()) require.NoError(t, err) lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ @@ -228,14 +247,20 @@ func TestLightClientAttackEvidenceValidation(t *testing.T) { } func TestMockEvidenceValidateBasic(t *testing.T) { - goodEvidence := NewMockDuplicateVoteEvidence(int64(1), time.Now(), "mock-chain-id") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + goodEvidence, err := NewMockDuplicateVoteEvidence(ctx, int64(1), time.Now(), "mock-chain-id") + require.NoError(t, err) assert.Nil(t, goodEvidence.ValidateBasic()) } func makeVote( + ctx context.Context, t *testing.T, val PrivValidator, chainID string, valIndex int32, height int64, round int32, step int, blockID BlockID, - time time.Time) *Vote { - pubKey, err := val.GetPubKey(context.Background()) + time time.Time, +) *Vote { + pubKey, err := val.GetPubKey(ctx) require.NoError(t, err) v := &Vote{ ValidatorAddress: pubKey.Address(), @@ -248,10 +273,9 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(context.Background(), chainID, vpb) - if err != nil { - panic(err) - } + err = val.SignVote(ctx, chainID, vpb) + require.NoError(t, err) + v.Signature = vpb.Signature return v } @@ -276,13 +300,16 @@ func makeHeaderRandom() *Header { } func TestEvidenceProto(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // -------- Votes -------- val := NewMockPV() blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) const chainID = "mychain" - v := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, defaultVoteTime) - v2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, defaultVoteTime) + v := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, defaultVoteTime) + v2 := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, defaultVoteTime) tests := []struct { testName string @@ -317,20 +344,23 @@ func TestEvidenceProto(t *testing.T) { } func TestEvidenceVectors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Votes for duplicateEvidence val := NewMockPV() val.PrivKey = ed25519.GenPrivKeyFromSecret([]byte("it's a secret")) // deterministic key blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) const chainID = "mychain" - v := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, defaultVoteTime) - v2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, defaultVoteTime) + v := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, defaultVoteTime) + v2 := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, defaultVoteTime) // Data for LightClientAttackEvidence height := int64(5) commonHeight := height - 1 nValidators := 10 - voteSet, valSet, privVals := deterministicVoteSet(height, 1, tmproto.PrecommitType, 1) + voteSet, valSet, privVals := deterministicVoteSet(ctx, t, height, 1, tmproto.PrecommitType, 1) header := &Header{ Version: version.Consensus{Block: 1, App: 1}, ChainID: chainID, @@ -350,7 +380,7 @@ func TestEvidenceVectors(t *testing.T) { ProposerAddress: []byte("2915b7b15f979e48ebc61774bb1d86ba3136b7eb"), } blockID3 := makeBlockID(header.Hash(), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - commit, err := makeCommit(blockID3, height, 1, voteSet, privVals, defaultVoteTime) + commit, err := makeCommit(ctx, blockID3, height, 1, voteSet, privVals, defaultVoteTime) require.NoError(t, err) lcae := &LightClientAttackEvidence{ ConflictingBlock: &LightBlock{ diff --git a/types/genesis.go b/types/genesis.go index 47580d5f7d..f711a70902 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -5,12 +5,12 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/jsontypes" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" tmtime "github.com/tendermint/tendermint/libs/time" ) @@ -27,17 +27,48 @@ const ( // GenesisValidator is an initial validator. type GenesisValidator struct { - Address Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - Power int64 `json:"power"` - Name string `json:"name"` + Address Address + PubKey crypto.PubKey + Power int64 + Name string +} + +type genesisValidatorJSON struct { + Address Address `json:"address"` + PubKey json.RawMessage `json:"pub_key"` + Power int64 `json:"power,string"` + Name string `json:"name"` +} + +func (g GenesisValidator) MarshalJSON() ([]byte, error) { + pk, err := jsontypes.Marshal(g.PubKey) + if err != nil { + return nil, err + } + return json.Marshal(genesisValidatorJSON{ + Address: g.Address, PubKey: pk, Power: g.Power, Name: g.Name, + }) +} + +func (g *GenesisValidator) UnmarshalJSON(data []byte) error { + var gv genesisValidatorJSON + if err := json.Unmarshal(data, &gv); err != nil { + return err + } + if err := jsontypes.Unmarshal(gv.PubKey, &g.PubKey); err != nil { + return err + } + g.Address = gv.Address + g.Power = gv.Power + g.Name = gv.Name + return nil } // GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. type GenesisDoc struct { GenesisTime time.Time `json:"genesis_time"` ChainID string `json:"chain_id"` - InitialHeight int64 `json:"initial_height"` + InitialHeight int64 `json:"initial_height,string"` ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators,omitempty"` AppHash tmbytes.HexBytes `json:"app_hash"` @@ -46,12 +77,12 @@ type GenesisDoc struct { // SaveAs is a utility method for saving GenensisDoc as a JSON file. func (genDoc *GenesisDoc) SaveAs(file string) error { - genDocBytes, err := tmjson.MarshalIndent(genDoc, "", " ") + genDocBytes, err := json.MarshalIndent(genDoc, "", " ") if err != nil { return err } - return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec + return os.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -111,7 +142,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { // GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { genDoc := GenesisDoc{} - err := tmjson.Unmarshal(jsonBlob, &genDoc) + err := json.Unmarshal(jsonBlob, &genDoc) if err != nil { return nil, err } @@ -125,7 +156,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { // GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) + jsonBlob, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } diff --git a/types/genesis_test.go b/types/genesis_test.go index 7fb3088ddf..1045b7065f 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "encoding/json" "os" "testing" @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" - tmjson "github.com/tendermint/tendermint/libs/json" tmtime "github.com/tendermint/tendermint/libs/time" ) @@ -57,21 +56,26 @@ func TestGenesisBad(t *testing.T) { } } -func TestGenesisGood(t *testing.T) { +func TestBasicGenesisDoc(t *testing.T) { // test a good one by raw json genDocBytes := []byte( `{ "genesis_time": "0001-01-01T00:00:00Z", "chain_id": "test-chain-QDKdJr", "initial_height": "1000", - "consensus_params": null, "validators": [{ "pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}, "power":"10", "name":"" }], "app_hash":"", - "app_state":{"account_owner": "Bob"} + "app_state":{"account_owner": "Bob"}, + "consensus_params": { + "synchrony": {"precision": "1", "message_delay": "10"}, + "validator": {"pub_key_types":["ed25519"]}, + "block": {"max_bytes": "100"}, + "evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"} + } }`, ) _, err := GenesisDocFromJSON(genDocBytes) @@ -83,7 +87,7 @@ func TestGenesisGood(t *testing.T) { ChainID: "abc", Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, } - genDocBytes, err = tmjson.Marshal(baseGenDoc) + genDocBytes, err = json.Marshal(baseGenDoc) assert.NoError(t, err, "error marshaling genDoc") // test base gendoc and check consensus params were filled @@ -95,14 +99,14 @@ func TestGenesisGood(t *testing.T) { assert.NotNil(t, genDoc.Validators[0].Address, "expected validator's address to be filled in") // create json with consensus params filled - genDocBytes, err = tmjson.Marshal(genDoc) + genDocBytes, err = json.Marshal(genDoc) assert.NoError(t, err, "error marshaling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") + require.NoError(t, err, "expected no error for valid genDoc json") // test with invalid consensus params genDoc.ConsensusParams.Block.MaxBytes = 0 - genDocBytes, err = tmjson.Marshal(genDoc) + genDocBytes, err = json.Marshal(genDoc) assert.NoError(t, err, "error marshaling genDoc") _, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") @@ -122,7 +126,7 @@ func TestGenesisGood(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "genesis") + tmpfile, err := os.CreateTemp("", "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/light.go b/types/light.go index 5a650a1596..3b1ddfcbe7 100644 --- a/types/light.go +++ b/types/light.go @@ -4,10 +4,26 @@ import ( "bytes" "errors" "fmt" + "time" + tbytes "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +// Info about the status of the light client +type LightClientInfo struct { + PrimaryID string `json:"primaryID"` + WitnessesID []string `json:"witnessesID"` + NumPeers int `json:"number_of_peers,string"` + LastTrustedHeight int64 `json:"last_trusted_height,string"` + LastTrustedHash tbytes.HexBytes `json:"last_trusted_hash"` + LatestBlockTime time.Time `json:"latest_block_time"` + TrustingPeriod string `json:"trusting_period"` + // Boolean that reflects whether LatestBlockTime + trusting period is before + // time.Now() (time when /status is called) + TrustedBlockExpired bool `json:"trusted_block_expired"` +} + // LightBlock is a SignedHeader and a ValidatorSet. // It is the basis of the light client type LightBlock struct { diff --git a/types/light_test.go b/types/light_test.go index 94b2c4b4fa..eb35474e27 100644 --- a/types/light_test.go +++ b/types/light_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "math" "testing" "time" @@ -12,14 +13,19 @@ import ( ) func TestLightBlockValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + header := MakeRandHeader() - commit := randCommit(time.Now()) - vals, _ := randValidatorPrivValSet(5, 1) + commit := randCommit(ctx, t, time.Now()) + vals, _ := randValidatorPrivValSet(ctx, t, 5, 1) + header.Height = commit.Height header.LastBlockID = commit.BlockID header.ValidatorsHash = vals.Hash() header.Version.Block = version.BlockProtocol - vals2, _ := randValidatorPrivValSet(3, 1) + vals2, _ := randValidatorPrivValSet(ctx, t, 3, 1) + vals3 := vals.Copy() vals3.Proposer = &Validator{} commit.BlockID.Hash = header.Hash() @@ -38,7 +44,7 @@ func TestLightBlockValidateBasic(t *testing.T) { {"valid light block", sh, vals, false}, {"hashes don't match", sh, vals2, true}, {"invalid validator set", sh, vals3, true}, - {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(time.Now())}, vals, true}, + {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(ctx, t, time.Now())}, vals, true}, } for _, tc := range testCases { @@ -57,9 +63,12 @@ func TestLightBlockValidateBasic(t *testing.T) { } func TestLightBlockProtobuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() header := MakeRandHeader() - commit := randCommit(time.Now()) - vals, _ := randValidatorPrivValSet(5, 1) + commit := randCommit(ctx, t, time.Now()) + vals, _ := randValidatorPrivValSet(ctx, t, 5, 1) + header.Height = commit.Height header.LastBlockID = commit.BlockID header.Version.Block = version.BlockProtocol @@ -110,7 +119,11 @@ func TestLightBlockProtobuf(t *testing.T) { } func TestSignedHeaderValidateBasic(t *testing.T) { - commit := randCommit(time.Now()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + commit := randCommit(ctx, t, time.Now()) + chainID := "𠜎" timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ diff --git a/types/node_info.go b/types/node_info.go index 902ca759bf..57aced0544 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -24,9 +24,9 @@ func MaxNodeInfoSize() int { // ProtocolVersion contains the protocol versions for the software. type ProtocolVersion struct { - P2P uint64 `json:"p2p"` - Block uint64 `json:"block"` - App uint64 `json:"app"` + P2P uint64 `json:"p2p,string"` + Block uint64 `json:"block,string"` + App uint64 `json:"app,string"` } //------------------------------------------------------------- diff --git a/types/node_info_test.go b/types/node_info_test.go index ff30aa30a8..c14570c96b 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -75,12 +75,12 @@ func TestNodeInfoValidate(t *testing.T) { name := "testing" // test case passes - ni = testNodeInfo(nodeKeyID, name) + ni = testNodeInfo(t, nodeKeyID, name) ni.Channels = channels assert.NoError(t, ni.Validate()) for _, tc := range testCases { - ni := testNodeInfo(nodeKeyID, name) + ni := testNodeInfo(t, nodeKeyID, name) ni.Channels = channels tc.malleateNodeInfo(&ni) err := ni.Validate() @@ -97,11 +97,12 @@ func testNodeID() NodeID { return NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) } -func testNodeInfo(id NodeID, name string) NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") +func testNodeInfo(t *testing.T, id NodeID, name string) NodeInfo { + return testNodeInfoWithNetwork(t, id, name, "testing") } -func testNodeInfoWithNetwork(id NodeID, name, network string) NodeInfo { +func testNodeInfoWithNetwork(t *testing.T, id NodeID, name, network string) NodeInfo { + t.Helper() return NodeInfo{ ProtocolVersion: ProtocolVersion{ P2P: version.P2PProtocol, @@ -109,23 +110,22 @@ func testNodeInfoWithNetwork(id NodeID, name, network string) NodeInfo { App: 0, }, NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort(t)), Network: network, Version: "1.2.3-rc0-deadbeef", Channels: []byte{testCh}, Moniker: name, Other: NodeInfoOther{ TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort(t)), }, } } -func getFreePort() int { +func getFreePort(t *testing.T) int { + t.Helper() port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } + require.NoError(t, err) return port } @@ -137,8 +137,8 @@ func TestNodeInfoCompatible(t *testing.T) { var newTestChannel byte = 0x2 // test NodeInfo is compatible - ni1 := testNodeInfo(nodeKey1ID, name) - ni2 := testNodeInfo(nodeKey2ID, name) + ni1 := testNodeInfo(t, nodeKey1ID, name) + ni2 := testNodeInfo(t, nodeKey2ID, name) assert.NoError(t, ni1.CompatibleWith(ni2)) // add another channel; still compatible @@ -155,14 +155,14 @@ func TestNodeInfoCompatible(t *testing.T) { } for _, tc := range testCases { - ni := testNodeInfo(nodeKey2ID, name) + ni := testNodeInfo(t, nodeKey2ID, name) tc.malleateNodeInfo(&ni) assert.Error(t, ni1.CompatibleWith(ni)) } } func TestNodeInfoAddChannel(t *testing.T) { - nodeInfo := testNodeInfo(testNodeID(), "testing") + nodeInfo := testNodeInfo(t, testNodeID(), "testing") nodeInfo.Channels = []byte{} require.Empty(t, nodeInfo.Channels) @@ -241,7 +241,7 @@ func TestParseAddressString(t *testing.T) { t.Run(tc.name, func(t *testing.T) { addr, port, err := ParseAddressString(tc.addr) if tc.correct { - require.Nil(t, err, tc.addr) + require.NoError(t, err, tc.addr) assert.Contains(t, tc.expected, addr.String()) assert.Contains(t, tc.expected, fmt.Sprint(port)) } else { diff --git a/types/node_key.go b/types/node_key.go index 547fa1696e..927e17065d 100644 --- a/types/node_key.go +++ b/types/node_key.go @@ -1,11 +1,12 @@ package types import ( - "io/ioutil" + "encoding/json" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" tmos "github.com/tendermint/tendermint/libs/os" ) @@ -17,23 +18,51 @@ import ( // It contains the nodes private key for authentication. type NodeKey struct { // Canonical ID - hex-encoded pubkey's address (IDByteLength bytes) - ID NodeID `json:"id"` + ID NodeID // Private key - PrivKey crypto.PrivKey `json:"priv_key"` + PrivKey crypto.PrivKey +} + +type nodeKeyJSON struct { + ID NodeID `json:"id"` + PrivKey json.RawMessage `json:"priv_key"` +} + +func (nk NodeKey) MarshalJSON() ([]byte, error) { + pk, err := jsontypes.Marshal(nk.PrivKey) + if err != nil { + return nil, err + } + return json.Marshal(nodeKeyJSON{ + ID: nk.ID, PrivKey: pk, + }) +} + +func (nk *NodeKey) UnmarshalJSON(data []byte) error { + var nkjson nodeKeyJSON + if err := json.Unmarshal(data, &nkjson); err != nil { + return err + } + var pk crypto.PrivKey + if err := jsontypes.Unmarshal(nkjson.PrivKey, &pk); err != nil { + return err + } + *nk = NodeKey{ID: nkjson.ID, PrivKey: pk} + return nil } // PubKey returns the peer's PubKey -func (nodeKey NodeKey) PubKey() crypto.PubKey { - return nodeKey.PrivKey.PubKey() +func (nk NodeKey) PubKey() crypto.PubKey { + return nk.PrivKey.PubKey() } // SaveAs persists the NodeKey to filePath. -func (nodeKey NodeKey) SaveAs(filePath string) error { - jsonBytes, err := tmjson.Marshal(nodeKey) +func (nk NodeKey) SaveAs(filePath string) error { + jsonBytes, err := json.Marshal(nk) if err != nil { return err } - return ioutil.WriteFile(filePath, jsonBytes, 0600) + return os.WriteFile(filePath, jsonBytes, 0600) } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If @@ -67,12 +96,12 @@ func GenNodeKey() NodeKey { // LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) + jsonBytes, err := os.ReadFile(filePath) if err != nil { return NodeKey{}, err } nodeKey := NodeKey{} - err = tmjson.Unmarshal(jsonBytes, &nodeKey) + err = json.Unmarshal(jsonBytes, &nodeKey) if err != nil { return NodeKey{}, err } diff --git a/types/node_key_test.go b/types/node_key_test.go index 7c0b945c38..0dea771eaf 100644 --- a/types/node_key_test.go +++ b/types/node_key_test.go @@ -7,23 +7,22 @@ import ( "github.com/stretchr/testify/require" - tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) func TestLoadOrGenNodeKey(t *testing.T) { - filePath := filepath.Join(os.TempDir(), tmrand.Str(12)+"_peer_id.json") + filePath := filepath.Join(t.TempDir(), "peer_id.json") nodeKey, err := types.LoadOrGenNodeKey(filePath) - require.Nil(t, err) + require.NoError(t, err) nodeKey2, err := types.LoadOrGenNodeKey(filePath) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, nodeKey, nodeKey2) } func TestLoadNodeKey(t *testing.T) { - filePath := filepath.Join(os.TempDir(), tmrand.Str(12)+"_peer_id.json") + filePath := filepath.Join(t.TempDir(), "peer_id.json") _, err := types.LoadNodeKey(filePath) require.True(t, os.IsNotExist(err)) @@ -37,7 +36,7 @@ func TestLoadNodeKey(t *testing.T) { } func TestNodeKeySaveAs(t *testing.T) { - filePath := filepath.Join(os.TempDir(), tmrand.Str(12)+"_peer_id.json") + filePath := filepath.Join(t.TempDir(), "peer_id.json") require.NoFileExists(t, filePath) nodeKey := types.GenNodeKey() diff --git a/types/params.go b/types/params.go index 32d0f71c80..e3ddb16346 100644 --- a/types/params.go +++ b/types/params.go @@ -41,6 +41,7 @@ type ConsensusParams struct { Evidence EvidenceParams `json:"evidence"` Validator ValidatorParams `json:"validator"` Version VersionParams `json:"version"` + Synchrony SynchronyParams `json:"synchrony"` } // HashedParams is a subset of ConsensusParams. @@ -54,15 +55,15 @@ type HashedParams struct { // BlockParams define limits on the block size and gas plus minimum time // between blocks. type BlockParams struct { - MaxBytes int64 `json:"max_bytes"` - MaxGas int64 `json:"max_gas"` + MaxBytes int64 `json:"max_bytes,string"` + MaxGas int64 `json:"max_gas,string"` } // EvidenceParams determine how we handle evidence of malfeasance. type EvidenceParams struct { - MaxAgeNumBlocks int64 `json:"max_age_num_blocks"` // only accept new evidence more recent than this - MaxAgeDuration time.Duration `json:"max_age_duration"` - MaxBytes int64 `json:"max_bytes"` + MaxAgeNumBlocks int64 `json:"max_age_num_blocks,string"` // only accept new evidence more recent than this + MaxAgeDuration time.Duration `json:"max_age_duration,string"` + MaxBytes int64 `json:"max_bytes,string"` } // ValidatorParams restrict the public key types validators can use. @@ -72,7 +73,16 @@ type ValidatorParams struct { } type VersionParams struct { - AppVersion uint64 `json:"app_version"` + AppVersion uint64 `json:"app_version,string"` +} + +// SynchronyParams influence the validity of block timestamps. +// For more information on the relationship of the synchrony parameters to +// block validity, see the Proposer-Based Timestamps specification: +// https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/README.md +type SynchronyParams struct { + Precision time.Duration `json:"precision,string"` + MessageDelay time.Duration `json:"message_delay,string"` } // DefaultConsensusParams returns a default ConsensusParams. @@ -82,6 +92,7 @@ func DefaultConsensusParams() *ConsensusParams { Evidence: DefaultEvidenceParams(), Validator: DefaultValidatorParams(), Version: DefaultVersionParams(), + Synchrony: DefaultSynchronyParams(), } } @@ -116,6 +127,15 @@ func DefaultVersionParams() VersionParams { } } +func DefaultSynchronyParams() SynchronyParams { + // TODO(@wbanfield): Determine experimental values for these defaults + // https://github.com/tendermint/tendermint/issues/7202 + return SynchronyParams{ + Precision: 500 * time.Millisecond, + MessageDelay: 3 * time.Second, + } +} + func (val *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { for i := 0; i < len(val.PubKeyTypes); i++ { if val.PubKeyTypes[i] == pubkeyType { @@ -148,7 +168,7 @@ func (params ConsensusParams) ValidateConsensusParams() error { } if params.Evidence.MaxAgeDuration <= 0 { - return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v", + return fmt.Errorf("evidence.MaxAgeDuration must be greater than 0 if provided, Got %v", params.Evidence.MaxAgeDuration) } @@ -162,6 +182,16 @@ func (params ConsensusParams) ValidateConsensusParams() error { params.Evidence.MaxBytes) } + if params.Synchrony.MessageDelay <= 0 { + return fmt.Errorf("synchrony.MessageDelay must be greater than 0. Got: %d", + params.Synchrony.MessageDelay) + } + + if params.Synchrony.Precision <= 0 { + return fmt.Errorf("synchrony.Precision must be greater than 0. Got: %d", + params.Synchrony.Precision) + } + if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") } @@ -205,6 +235,8 @@ func (params ConsensusParams) HashConsensusParams() []byte { func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { return params.Block == params2.Block && params.Evidence == params2.Evidence && + params.Version == params2.Version && + params.Synchrony == params2.Synchrony && tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) } @@ -235,6 +267,10 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa if params2.Version != nil { res.Version.AppVersion = params2.Version.AppVersion } + if params2.Synchrony != nil { + res.Synchrony.Precision = *params2.Synchrony.Precision + res.Synchrony.MessageDelay = *params2.Synchrony.MessageDelay + } return res } @@ -255,6 +291,10 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { Version: &tmproto.VersionParams{ AppVersion: params.Version.AppVersion, }, + Synchrony: &tmproto.SynchronyParams{ + MessageDelay: ¶ms.Synchrony.MessageDelay, + Precision: ¶ms.Synchrony.Precision, + }, } } @@ -275,5 +315,9 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams Version: VersionParams{ AppVersion: pbParams.Version.AppVersion, }, + Synchrony: SynchronyParams{ + MessageDelay: *pbParams.Synchrony.MessageDelay, + Precision: *pbParams.Synchrony.Precision, + }, } } diff --git a/types/params_test.go b/types/params_test.go index 8b69a81fca..4ef8ac1c56 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -23,23 +23,140 @@ func TestConsensusParamsValidation(t *testing.T) { valid bool }{ // test block params - 0: {makeParams(1, 0, 2, 0, valEd25519), true}, - 1: {makeParams(0, 0, 2, 0, valEd25519), false}, - 2: {makeParams(47*1024*1024, 0, 2, 0, valEd25519), true}, - 3: {makeParams(10, 0, 2, 0, valEd25519), true}, - 4: {makeParams(100*1024*1024, 0, 2, 0, valEd25519), true}, - 5: {makeParams(101*1024*1024, 0, 2, 0, valEd25519), false}, - 6: {makeParams(1024*1024*1024, 0, 2, 0, valEd25519), false}, - 7: {makeParams(1024*1024*1024, 0, -1, 0, valEd25519), false}, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 0, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 47 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 10, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 100 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 101 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1024 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1024 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, // test evidence params - 8: {makeParams(1, 0, 0, 0, valEd25519), false}, - 9: {makeParams(1, 0, 2, 2, valEd25519), false}, - 10: {makeParams(1000, 0, 2, 1, valEd25519), true}, - 11: {makeParams(1, 0, -1, 0, valEd25519), false}, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 0, + maxEvidenceBytes: 0, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + maxEvidenceBytes: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1000, + evidenceAge: 2, + maxEvidenceBytes: 1, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: -1, + maxEvidenceBytes: 0, + precision: 1, + messageDelay: 1}), + valid: false, + }, // test no pubkey type provided - 12: {makeParams(1, 0, 2, 0, []string{}), false}, + { + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + pubkeyTypes: []string{}, + precision: 1, + messageDelay: 1}), + valid: false, + }, + // test invalid pubkey type provided + { + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + pubkeyTypes: []string{"potatoes make good pubkeys"}, + precision: 1, + messageDelay: 1}), + valid: false, + }, // test invalid pubkey type provided - 13: {makeParams(1, 0, 2, 0, []string{"potatoes make good pubkeys"}), false}, + { + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + precision: 1, + messageDelay: -1}), + valid: false, + }, + { + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + precision: -1, + messageDelay: 1}), + valid: false, + }, } for i, tc := range testCases { if tc.valid { @@ -50,38 +167,51 @@ func TestConsensusParamsValidation(t *testing.T) { } } -func makeParams( - blockBytes, blockGas int64, - evidenceAge int64, - maxEvidenceBytes int64, - pubkeyTypes []string, -) ConsensusParams { +type makeParamsArgs struct { + blockBytes int64 + blockGas int64 + evidenceAge int64 + maxEvidenceBytes int64 + pubkeyTypes []string + precision time.Duration + messageDelay time.Duration +} + +func makeParams(args makeParamsArgs) ConsensusParams { + if args.pubkeyTypes == nil { + args.pubkeyTypes = valEd25519 + } return ConsensusParams{ Block: BlockParams{ - MaxBytes: blockBytes, - MaxGas: blockGas, + MaxBytes: args.blockBytes, + MaxGas: args.blockGas, }, Evidence: EvidenceParams{ - MaxAgeNumBlocks: evidenceAge, - MaxAgeDuration: time.Duration(evidenceAge), - MaxBytes: maxEvidenceBytes, + MaxAgeNumBlocks: args.evidenceAge, + MaxAgeDuration: time.Duration(args.evidenceAge), + MaxBytes: args.maxEvidenceBytes, }, Validator: ValidatorParams{ - PubKeyTypes: pubkeyTypes, + PubKeyTypes: args.pubkeyTypes, + }, + Synchrony: SynchronyParams{ + Precision: args.precision, + MessageDelay: args.messageDelay, }, } + } func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valEd25519), - makeParams(1, 4, 3, 1, valEd25519), - makeParams(1, 2, 4, 1, valEd25519), - makeParams(2, 5, 7, 1, valEd25519), - makeParams(1, 7, 6, 1, valEd25519), - makeParams(9, 5, 4, 1, valEd25519), - makeParams(7, 8, 9, 1, valEd25519), - makeParams(4, 6, 5, 1, valEd25519), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), } hashes := make([][]byte, len(params)) @@ -100,21 +230,36 @@ func TestConsensusParamsHash(t *testing.T) { } func TestConsensusParamsUpdate(t *testing.T) { + prec := time.Second * 2 + msgDelay := time.Second * 4 + testCases := []struct { - params ConsensusParams + intialParams ConsensusParams updates *tmproto.ConsensusParams updatedParams ConsensusParams }{ // empty updates { - makeParams(1, 2, 3, 0, valEd25519), - &tmproto.ConsensusParams{}, - makeParams(1, 2, 3, 0, valEd25519), + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{}, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + }, + { + // update synchrony params + intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), + + updates: &tmproto.ConsensusParams{ + Synchrony: &tmproto.SynchronyParams{ + Precision: &prec, + MessageDelay: &msgDelay, + }, + }, + updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}), }, // fine updates { - makeParams(1, 2, 3, 0, valEd25519), - &tmproto.ConsensusParams{ + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, MaxGas: 200, @@ -128,11 +273,15 @@ func TestConsensusParamsUpdate(t *testing.T) { PubKeyTypes: valSecp256k1, }, }, - makeParams(100, 200, 300, 50, valSecp256k1), + updatedParams: makeParams(makeParamsArgs{ + blockBytes: 100, blockGas: 200, + evidenceAge: 300, + maxEvidenceBytes: 50, + pubkeyTypes: valSecp256k1}), }, { - makeParams(1, 2, 3, 0, valEd25519), - &tmproto.ConsensusParams{ + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, MaxGas: 200, @@ -145,17 +294,23 @@ func TestConsensusParamsUpdate(t *testing.T) { Validator: &tmproto.ValidatorParams{ PubKeyTypes: valSr25519, }, - }, makeParams(100, 200, 300, 50, valSr25519), + }, + updatedParams: makeParams(makeParamsArgs{ + blockBytes: 100, + blockGas: 200, + evidenceAge: 300, + maxEvidenceBytes: 50, + pubkeyTypes: valSr25519}), }, } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.params.UpdateConsensusParams(tc.updates)) + assert.Equal(t, tc.updatedParams, tc.intialParams.UpdateConsensusParams(tc.updates)) } } func TestConsensusParamsUpdate_AppVersion(t *testing.T) { - params := makeParams(1, 2, 3, 0, valEd25519) + params := makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}) assert.EqualValues(t, 0, params.Version.AppVersion) @@ -167,14 +322,16 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { func TestProto(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valEd25519), - makeParams(1, 4, 3, 1, valEd25519), - makeParams(1, 2, 4, 1, valEd25519), - makeParams(2, 5, 7, 1, valEd25519), - makeParams(1, 7, 6, 1, valEd25519), - makeParams(9, 5, 4, 1, valEd25519), - makeParams(7, 8, 9, 1, valEd25519), - makeParams(4, 6, 5, 1, valEd25519), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), + makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), } for i := range params { diff --git a/types/part_set.go b/types/part_set.go index 3a691083f7..9bf36279f7 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -2,15 +2,15 @@ package types import ( "bytes" + "encoding/json" "errors" "fmt" "io" + "sync" "github.com/tendermint/tendermint/crypto/merkle" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -151,7 +151,7 @@ type PartSet struct { total uint32 hash []byte - mtx tmsync.Mutex + mtx sync.Mutex parts []*Part partsBitArray *bits.BitArray count uint32 @@ -365,7 +365,7 @@ func (ps *PartSet) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return tmjson.Marshal(struct { + return json.Marshal(struct { CountTotal string `json:"count/total"` PartsBitArray *bits.BitArray `json:"parts_bit_array"` }{ diff --git a/types/part_set_test.go b/types/part_set_test.go index c6ea0f4525..af65ca8db0 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/assert" @@ -48,7 +48,7 @@ func TestBasicPartSet(t *testing.T) { // adding existing part added, err = partSet2.AddPart(partSet2.GetPart(0)) assert.False(t, added) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, partSet.Hash(), partSet2.Hash()) assert.EqualValues(t, nParts, partSet2.Total()) @@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) { // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) + data2, err := io.ReadAll(data2Reader) require.NoError(t, err) assert.Equal(t, data, data2) diff --git a/types/priv_validator.go b/types/priv_validator.go index aa2c303dbd..5a9b27cb63 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -39,11 +39,11 @@ func (pvs PrivValidatorsByAddress) Len() int { } func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - pvi, err := pvs[i].GetPubKey(context.Background()) + pvi, err := pvs[i].GetPubKey(context.TODO()) if err != nil { panic(err) } - pvj, err := pvs[j].GetPubKey(context.Background()) + pvj, err := pvs[j].GetPubKey(context.TODO()) if err != nil { panic(err) } @@ -114,8 +114,8 @@ func (pv MockPV) SignProposal(ctx context.Context, chainID string, proposal *tmp return nil } -func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { - pubKey, _ := pv.GetPubKey(context.Background()) +func (pv MockPV) ExtractIntoValidator(ctx context.Context, votingPower int64) *Validator { + pubKey, _ := pv.GetPubKey(ctx) return &Validator{ Address: pubKey.Address(), PubKey: pubKey, @@ -125,7 +125,7 @@ func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { // String returns a string representation of the MockPV. func (pv MockPV) String() string { - mpv, _ := pv.GetPubKey(context.Background()) // mockPV will never return an error, ignored here + mpv, _ := pv.GetPubKey(context.TODO()) // mockPV will never return an error, ignored here return fmt.Sprintf("MockPV{%v}", mpv.Address()) } diff --git a/types/proposal.go b/types/proposal.go index 450cc2a433..a4009eea25 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -3,6 +3,7 @@ package types import ( "errors" "fmt" + "math/bits" "time" "github.com/tendermint/tendermint/internal/libs/protoio" @@ -24,7 +25,7 @@ var ( // If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { Type tmproto.SignedMsgType - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` // there can not be greater than 2_147_483_647 rounds POLRound int32 `json:"pol_round"` // -1 if null. BlockID BlockID `json:"block_id"` @@ -34,14 +35,14 @@ type Proposal struct { // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int32, polRound int32, blockID BlockID) *Proposal { +func NewProposal(height int64, round int32, polRound int32, blockID BlockID, ts time.Time) *Proposal { return &Proposal{ Type: tmproto.ProposalType, Height: height, Round: round, BlockID: blockID, POLRound: polRound, - Timestamp: tmtime.Now(), + Timestamp: tmtime.Canonical(ts), } } @@ -60,7 +61,7 @@ func (p *Proposal) ValidateBasic() error { return errors.New("negative POLRound (exception: -1)") } if err := p.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // ValidateBasic above would pass even if the BlockID was empty: if !p.BlockID.IsComplete() { @@ -79,6 +80,43 @@ func (p *Proposal) ValidateBasic() error { return nil } +// IsTimely validates that the block timestamp is 'timely' according to the proposer-based timestamp algorithm. +// To evaluate if a block is timely, its timestamp is compared to the local time of the validator along with the +// configured Precision and MsgDelay parameters. +// Specifically, a proposed block timestamp is considered timely if it is satisfies the following inequalities: +// +// localtime >= proposedBlockTime - Precision +// localtime <= proposedBlockTime + MsgDelay + Precision +// +// For more information on the meaning of 'timely', see the proposer-based timestamp specification: +// https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp +func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams, round int32) bool { + // The message delay values are scaled as rounds progress. + // Every 10 rounds, the message delay is doubled to allow consensus to + // proceed in the case that the chosen value was too small for the given network conditions. + // For more information and discussion on this mechanism, see the relevant github issue: + // https://github.com/tendermint/spec/issues/371 + maxShift := bits.LeadingZeros64(uint64(sp.MessageDelay)) - 1 + nShift := int((round / 10)) + + if nShift > maxShift { + // if the number of 'doublings' would would overflow the size of the int, use the + // maximum instead. + nShift = maxShift + } + msgDelay := sp.MessageDelay * time.Duration(1< 0 { vals.IncrementProposerPriority(1) diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 87008bb1c4..0963276264 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -81,7 +81,12 @@ func TestValidatorSetBasic(t *testing.T) { } func TestValidatorSetValidateBasic(t *testing.T) { - val, _ := randValidator(false, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + val, _, err := randValidator(ctx, false, 1) + require.NoError(t, err) + badVal := &Validator{} testCases := []struct { @@ -174,9 +179,7 @@ func BenchmarkValidatorSetCopy(b *testing.B) { pubKey := privKey.PubKey() val := NewValidator(pubKey, 10) err := vset.UpdateWithChangeSet([]*Validator{val}) - if err != nil { - panic("Failed to add validator") - } + require.NoError(b, err) } b.StartTimer() @@ -333,8 +336,8 @@ func TestProposerSelection3(t *testing.T) { } // serialize, deserialize, check proposer - b := vset.toBytes() - vset = vset.fromBytes(b) + b := vset.toBytes(t) + vset = vset.fromBytes(t, b) computed := vset.GetProposer() // findGetProposer() if i != 0 { @@ -382,18 +385,19 @@ func randModuloValidator(totalVotingPower int64) *Validator { return val } -func randValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { +func randValidator(ctx context.Context, randPower bool, minPower int64) (*Validator, PrivValidator, error) { privVal := NewMockPV() votePower := minPower if randPower { votePower += int64(rand.Uint32()) } - pubKey, err := privVal.GetPubKey(context.Background()) + pubKey, err := privVal.GetPubKey(ctx) if err != nil { - panic(fmt.Errorf("could not retrieve pubkey %w", err)) + return nil, nil, err } + val := NewValidator(pubKey, votePower) - return val, privVal + return val, privVal, nil } func randModuloValidatorSet(numValidators int) *ValidatorSet { @@ -406,32 +410,23 @@ func randModuloValidatorSet(numValidators int) *ValidatorSet { return NewValidatorSet(validators) } -func (vals *ValidatorSet) toBytes() []byte { +func (vals *ValidatorSet) toBytes(t *testing.T) []byte { pbvs, err := vals.ToProto() - if err != nil { - panic(err) - } + require.NoError(t, err) bz, err := pbvs.Marshal() - if err != nil { - panic(err) - } + require.NoError(t, err) return bz } -func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet { +func (vals *ValidatorSet) fromBytes(t *testing.T, b []byte) *ValidatorSet { pbvs := new(tmproto.ValidatorSet) err := pbvs.Unmarshal(b) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - panic(err) - } + require.NoError(t, err) vs, err := ValidatorSetFromProto(pbvs) - if err != nil { - panic(err) - } + require.NoError(t, err) return vs } @@ -1144,7 +1139,7 @@ type testVSetCfg struct { func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { if nBase <= 0 || nAddMax < 0 { - panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax)) + t.Fatalf("bad parameters %v %v", nBase, nAddMax) } const maxPower = 1000 @@ -1293,11 +1288,14 @@ func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg tes } func TestNewValidatorSetFromExistingValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + size := 5 vals := make([]*Validator, size) for i := 0; i < size; i++ { pv := NewMockPV() - vals[i] = pv.ExtractIntoValidator(int64(i + 1)) + vals[i] = pv.ExtractIntoValidator(ctx, int64(i+1)) } valSet := NewValidatorSet(vals) valSet.IncrementProposerPriority(5) @@ -1409,14 +1407,18 @@ func TestSafeMul(t *testing.T) { } func TestValidatorSetProtoBuf(t *testing.T) { - valset, _ := randValidatorPrivValSet(10, 100) - valset2, _ := randValidatorPrivValSet(10, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + valset, _ := randValidatorPrivValSet(ctx, t, 10, 100) + valset2, _ := randValidatorPrivValSet(ctx, t, 10, 100) valset2.Validators[0] = &Validator{} - valset3, _ := randValidatorPrivValSet(10, 100) + valset3, _ := randValidatorPrivValSet(ctx, t, 10, 100) valset3.Proposer = nil - valset4, _ := randValidatorPrivValSet(10, 100) + valset4, _ := randValidatorPrivValSet(ctx, t, 10, 100) + valset4.Proposer = &Validator{} testCases := []struct { @@ -1521,7 +1523,10 @@ func BenchmarkUpdates(b *testing.B) { } } -func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { +func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { // nolint + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range []int{1, 8, 64, 1024} { n := n var ( @@ -1532,9 +1537,9 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) { b.ReportAllocs() // generate n validators - voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5)) + voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) // create a commit with n validators - commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now()) + commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) for i := 0; i < b.N/n; i++ { @@ -1545,7 +1550,10 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { } } -func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { +func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { // nolint + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range []int{1, 8, 64, 1024} { n := n var ( @@ -1556,9 +1564,10 @@ func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) { b.ReportAllocs() // generate n validators - voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5)) + voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) + // create a commit with n validators - commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now()) + commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) for i := 0; i < b.N/n; i++ { @@ -1570,6 +1579,9 @@ func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) { } func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range []int{1, 8, 64, 1024} { n := n var ( @@ -1580,9 +1592,9 @@ func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) { b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) { b.ReportAllocs() // generate n validators - voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5)) + voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, int64(n*5)) // create a commit with n validators - commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now()) + commit, err := makeCommit(ctx, blockID, h, 0, voteSet, vals, time.Now()) require.NoError(b, err) for i := 0; i < b.N/n; i++ { @@ -1599,15 +1611,17 @@ func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) { // where each validator has a power of 50 // // EXPOSED FOR TESTING. -func deterministicValidatorSet() (*ValidatorSet, []PrivValidator) { +func deterministicValidatorSet(ctx context.Context, t *testing.T) (*ValidatorSet, []PrivValidator) { var ( valz = make([]*Validator, 10) privValidators = make([]PrivValidator, 10) ) + t.Helper() + for i := 0; i < 10; i++ { // val, privValidator := DeterministicValidator(ed25519.PrivKey([]byte(deterministicKeys[i]))) - val, privValidator := deterministicValidator(ed25519.GenPrivKeyFromSecret([]byte(fmt.Sprintf("key: %x", i)))) + val, privValidator := deterministicValidator(ctx, t, ed25519.GenPrivKeyFromSecret([]byte(fmt.Sprintf("key: %x", i)))) valz[i] = val privValidators[i] = privValidator } diff --git a/types/validator_test.go b/types/validator_test.go index 872dee8207..1e29787fdf 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -2,7 +2,6 @@ package types import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -11,7 +10,12 @@ import ( ) func TestValidatorProtoBuf(t *testing.T) { - val, _ := randValidator(true, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + val, _, err := randValidator(ctx, true, 100) + require.NoError(t, err) + testCases := []struct { msg string v1 *Validator @@ -42,8 +46,11 @@ func TestValidatorProtoBuf(t *testing.T) { } func TestValidatorValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + priv := NewMockPV() - pubKey, _ := priv.GetPubKey(context.Background()) + pubKey, _ := priv.GetPubKey(ctx) testCases := []struct { val *Validator err bool @@ -105,14 +112,13 @@ func TestValidatorValidateBasic(t *testing.T) { // deterministicValidator returns a deterministic validator, useful for testing. // UNSTABLE -func deterministicValidator(key crypto.PrivKey) (*Validator, PrivValidator) { +func deterministicValidator(ctx context.Context, t *testing.T, key crypto.PrivKey) (*Validator, PrivValidator) { + t.Helper() privVal := NewMockPV() privVal.PrivKey = key var votePower int64 = 50 - pubKey, err := privVal.GetPubKey(context.TODO()) - if err != nil { - panic(fmt.Errorf("could not retrieve pubkey %w", err)) - } + pubKey, err := privVal.GetPubKey(ctx) + require.NoError(t, err, "could not retrieve pubkey") val := NewValidator(pubKey, votePower) return val, privVal } diff --git a/types/vote.go b/types/vote.go index e8105ad70f..0db1849a93 100644 --- a/types/vote.go +++ b/types/vote.go @@ -24,6 +24,7 @@ var ( ErrVoteInvalidBlockHash = errors.New("invalid block hash") ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") ErrVoteNil = errors.New("nil vote") + ErrVoteInvalidExtension = errors.New("invalid vote extension") ) type ErrVoteConflictingVotes struct { @@ -45,17 +46,86 @@ func NewConflictingVoteError(vote1, vote2 *Vote) *ErrVoteConflictingVotes { // Address is hex bytes. type Address = crypto.Address +// VoteExtensionToSign is a subset of VoteExtension +// that is signed by the validators private key +type VoteExtensionToSign struct { + AppDataToSign []byte `json:"app_data_to_sign"` +} + +func (ext VoteExtensionToSign) ToProto() *tmproto.VoteExtensionToSign { + if ext.IsEmpty() { + return nil + } + return &tmproto.VoteExtensionToSign{ + AppDataToSign: ext.AppDataToSign, + } +} + +func VoteExtensionToSignFromProto(pext *tmproto.VoteExtensionToSign) VoteExtensionToSign { + if pext == nil { + return VoteExtensionToSign{} + } + return VoteExtensionToSign{ + AppDataToSign: pext.AppDataToSign, + } +} + +func (ext VoteExtensionToSign) IsEmpty() bool { + return len(ext.AppDataToSign) == 0 +} + +// BytesPacked returns a bytes-packed representation for +// debugging and human identification. This function should +// not be used for any logical operations. +func (ext VoteExtensionToSign) BytesPacked() []byte { + res := []byte{} + res = append(res, ext.AppDataToSign...) + return res +} + +// ToVoteExtension constructs a VoteExtension from a VoteExtensionToSign +func (ext VoteExtensionToSign) ToVoteExtension() VoteExtension { + return VoteExtension{ + AppDataToSign: ext.AppDataToSign, + } +} + +// VoteExtension is a set of data provided by the application +// that is additionally included in the vote +type VoteExtension struct { + AppDataToSign []byte `json:"app_data_to_sign"` + AppDataSelfAuthenticating []byte `json:"app_data_self_authenticating"` +} + +// ToSign constructs a VoteExtensionToSign from a VoteExtenstion +func (ext VoteExtension) ToSign() VoteExtensionToSign { + return VoteExtensionToSign{ + AppDataToSign: ext.AppDataToSign, + } +} + +// BytesPacked returns a bytes-packed representation for +// debugging and human identification. This function should +// not be used for any logical operations. +func (ext VoteExtension) BytesPacked() []byte { + res := []byte{} + res = append(res, ext.AppDataToSign...) + res = append(res, ext.AppDataSelfAuthenticating...) + return res +} + // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { Type tmproto.SignedMsgType `json:"type"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` // assume there will not be greater than 2_147_483_647 rounds BlockID BlockID `json:"block_id"` // zero if vote is nil. Timestamp time.Time `json:"timestamp"` ValidatorAddress Address `json:"validator_address"` ValidatorIndex int32 `json:"validator_index"` Signature []byte `json:"signature"` + VoteExtension VoteExtension `json:"vote_extension"` } // CommitSig converts the Vote to a CommitSig. @@ -68,7 +138,7 @@ func (vote *Vote) CommitSig() CommitSig { switch { case vote.BlockID.IsComplete(): blockIDFlag = BlockIDFlagCommit - case vote.BlockID.IsZero(): + case vote.BlockID.IsNil(): blockIDFlag = BlockIDFlagNil default: panic(fmt.Sprintf("Invalid vote %v - expected BlockID to be either empty or complete", vote)) @@ -79,6 +149,7 @@ func (vote *Vote) CommitSig() CommitSig { ValidatorAddress: vote.ValidatorAddress, Timestamp: vote.Timestamp, Signature: vote.Signature, + VoteExtension: vote.VoteExtension.ToSign(), } } @@ -102,6 +173,7 @@ func VoteSignBytes(chainID string, vote *tmproto.Vote) []byte { func (vote *Vote) Copy() *Vote { voteCopy := *vote + voteCopy.VoteExtension = vote.VoteExtension.Copy() return &voteCopy } @@ -115,7 +187,8 @@ func (vote *Vote) Copy() *Vote { // 6. type string // 7. first 6 bytes of block hash // 8. first 6 bytes of signature -// 9. timestamp +// 9. first 6 bytes of vote extension +// 10. timestamp func (vote *Vote) String() string { if vote == nil { return nilVoteStr @@ -131,7 +204,7 @@ func (vote *Vote) String() string { panic("Unknown vote type") } - return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", + return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X @ %s}", vote.ValidatorIndex, tmbytes.Fingerprint(vote.ValidatorAddress), vote.Height, @@ -140,6 +213,7 @@ func (vote *Vote) String() string { typeString, tmbytes.Fingerprint(vote.BlockID.Hash), tmbytes.Fingerprint(vote.Signature), + tmbytes.Fingerprint(vote.VoteExtension.BytesPacked()), CanonicalTime(vote.Timestamp), ) } @@ -172,12 +246,12 @@ func (vote *Vote) ValidateBasic() error { // NOTE: Timestamp validation is subtle and handled elsewhere. if err := vote.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // BlockID.ValidateBasic would not err if we for instance have an empty hash but a // non-empty PartsSetHeader: - if !vote.BlockID.IsZero() && !vote.BlockID.IsComplete() { + if !vote.BlockID.IsNil() && !vote.BlockID.IsComplete() { return fmt.Errorf("blockID must be either empty or complete, got: %v", vote.BlockID) } @@ -198,9 +272,40 @@ func (vote *Vote) ValidateBasic() error { return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize) } + // XXX: add length verification for vote extension? + return nil } +func (ext VoteExtension) Copy() VoteExtension { + res := VoteExtension{ + AppDataToSign: ext.AppDataToSign, + AppDataSelfAuthenticating: ext.AppDataSelfAuthenticating, + } + return res +} + +func (ext VoteExtension) IsEmpty() bool { + if len(ext.AppDataToSign) != 0 { + return false + } + if len(ext.AppDataSelfAuthenticating) != 0 { + return false + } + return true +} + +func (ext VoteExtension) ToProto() *tmproto.VoteExtension { + if ext.IsEmpty() { + return nil + } + + return &tmproto.VoteExtension{ + AppDataToSign: ext.AppDataToSign, + AppDataSelfAuthenticating: ext.AppDataSelfAuthenticating, + } +} + // ToProto converts the handwritten type to proto generated type // return type, nil if everything converts safely, otherwise nil, error func (vote *Vote) ToProto() *tmproto.Vote { @@ -217,7 +322,29 @@ func (vote *Vote) ToProto() *tmproto.Vote { ValidatorAddress: vote.ValidatorAddress, ValidatorIndex: vote.ValidatorIndex, Signature: vote.Signature, + VoteExtension: vote.VoteExtension.ToProto(), + } +} + +func VotesToProto(votes []*Vote) (res []*tmproto.Vote) { + if votes == nil { + return nil + } + + res = make([]*tmproto.Vote, len(votes)) + for i, vote := range votes { + res[i] = vote.ToProto() + } + return +} + +func VoteExtensionFromProto(pext *tmproto.VoteExtension) VoteExtension { + ext := VoteExtension{} + if pext != nil { + ext.AppDataToSign = pext.AppDataToSign + ext.AppDataSelfAuthenticating = pext.AppDataSelfAuthenticating } + return ext } // FromProto converts a proto generetad type to a handwritten type @@ -241,6 +368,7 @@ func VoteFromProto(pv *tmproto.Vote) (*Vote, error) { vote.ValidatorAddress = pv.ValidatorAddress vote.ValidatorIndex = pv.ValidatorIndex vote.Signature = pv.Signature + vote.VoteExtension = VoteExtensionFromProto(pv.VoteExtension) return vote, vote.ValidateBasic() } diff --git a/types/vote_set.go b/types/vote_set.go index b064f2c07c..bb675e1102 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -2,12 +2,12 @@ package types import ( "bytes" + "encoding/json" "fmt" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -65,7 +65,7 @@ type VoteSet struct { signedMsgType tmproto.SignedMsgType valSet *ValidatorSet - mtx tmsync.Mutex + mtx sync.Mutex votesBitArray *bits.BitArray votes []*Vote // Primary votes to share sum int64 // Sum of voting power for seen votes, discounting conflicts @@ -226,6 +226,10 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } +func (voteSet *VoteSet) GetVotes() []*Vote { + return voteSet.votes +} + // Assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote( @@ -372,9 +376,26 @@ func (voteSet *VoteSet) GetByIndex(valIndex int32) *Vote { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() + if int(valIndex) >= len(voteSet.votes) { + return nil + } return voteSet.votes[valIndex] } +// List returns a copy of the list of votes stored by the VoteSet. +func (voteSet *VoteSet) List() []Vote { + if voteSet == nil || voteSet.votes == nil { + return nil + } + votes := make([]Vote, 0, len(voteSet.votes)) + for i := range voteSet.votes { + if voteSet.votes[i] != nil { + votes = append(votes, *voteSet.votes[i]) + } + } + return votes +} + func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { if voteSet == nil { return nil @@ -420,6 +441,9 @@ func (voteSet *VoteSet) HasTwoThirdsAny() bool { } func (voteSet *VoteSet) HasAll() bool { + if voteSet == nil { + return false + } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() return voteSet.sum == voteSet.valSet.TotalVotingPower() @@ -492,7 +516,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - return tmjson.Marshal(VoteSetJSON{ + return json.Marshal(VoteSetJSON{ voteSet.voteStrings(), voteSet.bitArrayString(), voteSet.peerMaj23s, @@ -610,6 +634,7 @@ func (voteSet *VoteSet) MakeCommit() *Commit { if commitSig.ForBlock() && !v.BlockID.Equals(*voteSet.maj23) { commitSig = NewCommitSigAbsent() } + commitSigs[i] = commitSig } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 542665e2d8..8baa74172f 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -16,18 +16,22 @@ import ( ) func TestVoteSet_AddVote_Good(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, 1) + val0 := privValidators[0] - val0p, err := val0.GetPubKey(context.Background()) + val0p, err := val0.GetPubKey(ctx) require.NoError(t, err) val0Addr := val0p.Address() assert.Nil(t, voteSet.GetByAddress(val0Addr)) assert.False(t, voteSet.BitArray().GetIndex(0)) blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") vote := &Vote{ ValidatorAddress: val0Addr, @@ -38,18 +42,21 @@ func TestVoteSet_AddVote_Good(t *testing.T) { Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } - _, err = signAddVote(val0, vote, voteSet) + _, err = signAddVote(ctx, val0, vote, voteSet) require.NoError(t, err) assert.NotNil(t, voteSet.GetByAddress(val0Addr)) assert.True(t, voteSet.BitArray().GetIndex(0)) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") } func TestVoteSet_AddVote_Bad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, @@ -63,11 +70,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes for nil. { - pubKey, err := privValidators[0].GetPubKey(context.Background()) + pubKey, err := privValidators[0].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) - added, err := signAddVote(privValidators[0], vote, voteSet) + added, err := signAddVote(ctx, privValidators[0], vote, voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -75,11 +82,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes again for some block. { - pubKey, err := privValidators[0].GetPubKey(context.Background()) + pubKey, err := privValidators[0].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, conflicting vote.") } @@ -87,11 +94,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val1 votes on another height { - pubKey, err := privValidators[1].GetPubKey(context.Background()) + pubKey, err := privValidators[1].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 1) - added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) + added, err := signAddVote(ctx, privValidators[1], withHeight(vote, height+1), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong height") } @@ -99,11 +106,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val2 votes on another round { - pubKey, err := privValidators[2].GetPubKey(context.Background()) + pubKey, err := privValidators[2].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 2) - added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) + added, err := signAddVote(ctx, privValidators[2], withRound(vote, round+1), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong round") } @@ -111,11 +118,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val3 votes of another type. { - pubKey, err := privValidators[3].GetPubKey(context.Background()) + pubKey, err := privValidators[3].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 3) - added, err := signAddVote(privValidators[3], withType(vote, byte(tmproto.PrecommitType)), voteSet) + added, err := signAddVote(ctx, privValidators[3], withType(vote, byte(tmproto.PrecommitType)), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong type") } @@ -123,8 +130,11 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { } func TestVoteSet_2_3Majority(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in @@ -137,44 +147,47 @@ func TestVoteSet_2_3Majority(t *testing.T) { } // 6 out of 10 voted for nil. for i := int32(0); i < 6; i++ { - pubKey, err := privValidators[i].GetPubKey(context.Background()) + pubKey, err := privValidators[i].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") // 7th validator voted for some blockhash { - pubKey, err := privValidators[6].GetPubKey(context.Background()) + pubKey, err := privValidators[6].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 6) - _, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(ctx, privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") } // 8th validator voted for nil. { - pubKey, err := privValidators[7].GetPubKey(context.Background()) + pubKey, err := privValidators[7].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 7) - _, err = signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(ctx, privValidators[7], vote, voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.True(t, ok || blockID.IsZero(), "there should be 2/3 majority for nil") + assert.True(t, ok || blockID.IsNil(), "there should be 2/3 majority for nil") } } func TestVoteSet_2_3MajorityRedux(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 100, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) blockPartsTotal := uint32(123) @@ -192,78 +205,78 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := int32(0); i < 66; i++ { - pubKey, err := privValidators[i].GetPubKey(context.Background()) + pubKey, err := privValidators[i].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") // 67th validator voted for nil { - pubKey, err := privValidators[66].GetPubKey(context.Background()) + pubKey, err := privValidators[66].GetPubKey(ctx) require.NoError(t, err) adrr := pubKey.Address() vote := withValidator(voteProto, adrr, 66) - _, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + _, err = signAddVote(ctx, privValidators[66], withBlockHash(vote, nil), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added was nil") } // 68th validator voted for a different BlockParts PartSetHeader { - pubKey, err := privValidators[67].GetPubKey(context.Background()) + pubKey, err := privValidators[67].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err = signAddVote(privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(ctx, privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different PartSetHeader Hash") } // 69th validator voted for different BlockParts Total { - pubKey, err := privValidators[68].GetPubKey(context.Background()) + pubKey, err := privValidators[68].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 68) blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartSetHeader.Hash} - _, err = signAddVote(privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(ctx, privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different PartSetHeader Total") } // 70th validator voted for different BlockHash { - pubKey, err := privValidators[69].GetPubKey(context.Background()) + pubKey, err := privValidators[69].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 69) - _, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(ctx, privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different BlockHash") } // 71st validator voted for the right BlockHash & BlockPartSetHeader { - pubKey, err := privValidators[70].GetPubKey(context.Background()) + pubKey, err := privValidators[70].GetPubKey(ctx) require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 70) - _, err = signAddVote(privValidators[70], vote, voteSet) + _, err = signAddVote(ctx, privValidators[70], vote, voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() assert.True(t, ok && blockID.Equals(BlockID{blockHash, blockPartSetHeader}), @@ -272,8 +285,12 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { } func TestVoteSet_Conflicts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 4, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 4, 1) + blockHash1 := tmrand.Bytes(32) blockHash2 := tmrand.Bytes(32) @@ -287,14 +304,14 @@ func TestVoteSet_Conflicts(t *testing.T) { BlockID: BlockID{nil, PartSetHeader{}}, } - val0, err := privValidators[0].GetPubKey(context.Background()) + val0, err := privValidators[0].GetPubKey(ctx) require.NoError(t, err) val0Addr := val0.Address() // val0 votes for nil. { vote := withValidator(voteProto, val0Addr, 0) - added, err := signAddVote(privValidators[0], vote, voteSet) + added, err := signAddVote(ctx, privValidators[0], vote, voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -303,7 +320,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0Addr, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.False(t, added, "conflicting vote") assert.Error(t, err, "conflicting vote") } @@ -315,7 +332,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0Addr, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added, "called SetPeerMaj23()") assert.Error(t, err, "conflicting vote") } @@ -327,18 +344,18 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0Addr, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash2), voteSet) assert.False(t, added, "duplicate SetPeerMaj23() from peerA") assert.Error(t, err, "conflicting vote") } // val1 votes for blockHash1. { - pv, err := privValidators[1].GetPubKey(context.Background()) + pv, err := privValidators[1].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 1) - added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[1], withBlockHash(vote, blockHash1), voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -354,11 +371,11 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash2. { - pv, err := privValidators[2].GetPubKey(context.Background()) + pv, err := privValidators[2].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) + added, err := signAddVote(ctx, privValidators[2], withBlockHash(vote, blockHash2), voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -378,11 +395,11 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash1. { - pv, err := privValidators[2].GetPubKey(context.Background()) + pv, err := privValidators[2].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[2], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added) assert.Error(t, err, "conflicting vote") } @@ -401,8 +418,12 @@ func TestVoteSet_Conflicts(t *testing.T) { } func TestVoteSet_MakeCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrecommitType, 10, 1) + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrecommitType, 10, 1) + blockHash, blockPartSetHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ @@ -417,11 +438,11 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := int32(0); i < 6; i++ { - pv, err := privValidators[i].GetPubKey(context.Background()) + pv, err := privValidators[i].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -432,36 +453,36 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 7th voted for some other block. { - pv, err := privValidators[6].GetPubKey(context.Background()) + pv, err := privValidators[6].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 6) vote = withBlockHash(vote, tmrand.Bytes(32)) vote = withBlockPartSetHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) - _, err = signAddVote(privValidators[6], vote, voteSet) + _, err = signAddVote(ctx, privValidators[6], vote, voteSet) require.NoError(t, err) } // The 8th voted like everyone else. { - pv, err := privValidators[7].GetPubKey(context.Background()) + pv, err := privValidators[7].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 7) - _, err = signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(ctx, privValidators[7], vote, voteSet) require.NoError(t, err) } // The 9th voted for nil. { - pv, err := privValidators[8].GetPubKey(context.Background()) + pv, err := privValidators[8].GetPubKey(ctx) assert.NoError(t, err) addr := pv.Address() vote := withValidator(voteProto, addr, 8) vote.BlockID = BlockID{} - _, err = signAddVote(privValidators[8], vote, voteSet) + _, err = signAddVote(ctx, privValidators[8], vote, voteSet) require.NoError(t, err) } @@ -472,40 +493,49 @@ func TestVoteSet_MakeCommit(t *testing.T) { // Ensure that Commit is good. if err := commit.ValidateBasic(); err != nil { - t.Errorf("error in Commit.ValidateBasic(): %v", err) + t.Errorf("error in Commit.ValidateBasic(): %w", err) } } // NOTE: privValidators are in order func randVoteSet( + ctx context.Context, + t testing.TB, height int64, round int32, signedMsgType tmproto.SignedMsgType, numValidators int, votingPower int64, ) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := randValidatorPrivValSet(numValidators, votingPower) + t.Helper() + valSet, privValidators := randValidatorPrivValSet(ctx, t, numValidators, votingPower) + return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators } func deterministicVoteSet( + ctx context.Context, + t *testing.T, height int64, round int32, signedMsgType tmproto.SignedMsgType, votingPower int64, ) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := deterministicValidatorSet() + t.Helper() + valSet, privValidators := deterministicValidatorSet(ctx, t) return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators } -func randValidatorPrivValSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { +func randValidatorPrivValSet(ctx context.Context, t testing.TB, numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { var ( valz = make([]*Validator, numValidators) privValidators = make([]PrivValidator, numValidators) ) for i := 0; i < numValidators; i++ { - val, privValidator := randValidator(false, votingPower) + val, privValidator, err := randValidator(ctx, false, votingPower) + require.NoError(t, err) + valz[i] = val privValidators[i] = privValidator } diff --git a/types/vote_test.go b/types/vote_test.go index bfd1c41644..4a852d81f4 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -16,19 +16,20 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -func examplePrevote() *Vote { - return exampleVote(byte(tmproto.PrevoteType)) +func examplePrevote(t *testing.T) *Vote { + t.Helper() + return exampleVote(t, byte(tmproto.PrevoteType)) } -func examplePrecommit() *Vote { - return exampleVote(byte(tmproto.PrecommitType)) +func examplePrecommit(t testing.TB) *Vote { + t.Helper() + return exampleVote(t, byte(tmproto.PrecommitType)) } -func exampleVote(t byte) *Vote { +func exampleVote(tb testing.TB, t byte) *Vote { + tb.Helper() var stamp, err = time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") - if err != nil { - panic(err) - } + require.NoError(tb, err) return &Vote{ Type: tmproto.SignedMsgType(t), @@ -46,9 +47,8 @@ func exampleVote(t byte) *Vote { ValidatorIndex: 56789, } } - func TestVoteSignable(t *testing.T) { - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() signBytes := VoteSignBytes("test_chain_id", v) pb := CanonicalizeVote("test_chain_id", v) @@ -128,6 +128,33 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x32, 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID }, + // containing vote extension + 5: { + "test_chain_id", &Vote{Height: 1, Round: 1, VoteExtension: VoteExtension{ + AppDataToSign: []byte("signed"), + AppDataSelfAuthenticating: []byte("auth"), + }}, + []byte{ + 0x38, // length + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaning fields: + 0x2a, // (field_number << 3) | wire_type + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp + // (field_number << 3) | wire_type + 0x32, + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, // chainID + // (field_number << 3) | wire_type + 0x3a, + 0x8, // length + 0xa, // (field_number << 3) | wire_type + 0x6, // length + 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, // AppDataSigned + // SelfAuthenticating data is excluded on signing + }, // chainID + }, } for i, tc := range tests { v := tc.vote.ToProto() @@ -148,16 +175,19 @@ func TestVoteProposalNotEq(t *testing.T) { } func TestVoteVerifySignature(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + privVal := NewMockPV() - pubkey, err := privVal.GetPubKey(context.Background()) + pubkey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() signBytes := VoteSignBytes("test_chain_id", v) // sign it - err = privVal.SignVote(context.Background(), "test_chain_id", v) + err = privVal.SignVote(ctx, "test_chain_id", v) require.NoError(t, err) // verify the same vote @@ -200,11 +230,14 @@ func TestIsVoteTypeValid(t *testing.T) { } func TestVoteVerify(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + privVal := NewMockPV() - pubkey, err := privVal.GetPubKey(context.Background()) + pubkey, err := privVal.GetPubKey(ctx) require.NoError(t, err) - vote := examplePrevote() + vote := examplePrevote(t) vote.ValidatorAddress = pubkey.Address() err = vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) @@ -219,14 +252,14 @@ func TestVoteVerify(t *testing.T) { } func TestVoteString(t *testing.T) { - str := examplePrecommit().String() - expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests + str := examplePrecommit(t).String() + expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } - str2 := examplePrevote().String() - expected = `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests + str2 := examplePrevote(t).String() + expected = `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str2 != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) } @@ -254,9 +287,12 @@ func TestVoteValidateBasic(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.testName, func(t *testing.T) { - vote := examplePrecommit() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vote := examplePrecommit(t) v := vote.ToProto() - err := privVal.SignVote(context.Background(), "test_chain_id", v) + err := privVal.SignVote(ctx, "test_chain_id", v) vote.Signature = v.Signature require.NoError(t, err) tc.malleateVote(vote) @@ -266,10 +302,13 @@ func TestVoteValidateBasic(t *testing.T) { } func TestVoteProtobuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + privVal := NewMockPV() - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() - err := privVal.SignVote(context.Background(), "test_chain_id", v) + err := privVal.SignVote(ctx, "test_chain_id", v) vote.Signature = v.Signature require.NoError(t, err) @@ -294,3 +333,58 @@ func TestVoteProtobuf(t *testing.T) { } } } + +var sink interface{} + +func getSampleCommit(ctx context.Context, t testing.TB) *Commit { + t.Helper() + + lastID := makeBlockIDRandom() + voteSet, _, vals := randVoteSet(ctx, t, 2, 1, tmproto.PrecommitType, 10, 1) + commit, err := makeCommit(ctx, lastID, 2, 1, voteSet, vals, time.Now()) + + require.NoError(t, err) + + return commit +} + +func BenchmarkVoteSignBytes(b *testing.B) { + protoVote := examplePrecommit(b).ToProto() + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + sink = VoteSignBytes("test_chain_id", protoVote) + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} + +func BenchmarkCommitVoteSignBytes(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sampleCommit := getSampleCommit(ctx, b) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for index := range sampleCommit.Signatures { + sink = sampleCommit.VoteSignBytes("test_chain_id", int32(index)) + } + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) +} diff --git a/version/version.go b/version/version.go index e42952f773..483fca031a 100644 --- a/version/version.go +++ b/version/version.go @@ -28,8 +28,8 @@ var ( ) type Consensus struct { - Block uint64 `json:"block"` - App uint64 `json:"app"` + Block uint64 `json:"block,string"` + App uint64 `json:"app,string"` } func (c Consensus) ToProto() tmversion.Consensus {